prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# coding: utf-8
# In[1]:
# This code can be downloaded as a Python script and run as:
# python full_vs_EM_any_dataset.py random_state dataset_name test_proportion val_proportion M_method M_alpha M_beta
# test_proportion: The test proportion is from all the available true labels
# val_proportion: The validation proportion is from the remaining training proportion with the true labels
def is_interactive():
import __main__ as main
return not hasattr(main, '__file__')
import sys
import argparse
import numpy
import matplotlib
import os
import glob
import pandas
import keras
from keras import backend as K
import matplotlib.pyplot as plt
from sklearn.preprocessing import label_binarize
from sklearn.utils import shuffle
from wlc.WLweakener import computeM, generateWeak, weak_to_index, binarizeWeakLabels
from experiments.visualizations import plot_history
from experiments.visualizations import plot_multilabel_scatter
cmap = plt.cm.get_cmap('tab20')
from experiments.utils import compute_friedmanchisquare
from experiments.utils import rankings_to_latex
dataset_name = 'mnist'
def statistical_tests(table, filename):
# Friedman test
ftest = compute_friedmanchisquare(table)
df_rankings = pandas.DataFrame(table.rank(axis=1).mean(axis=0).sort_index()).T
with open(filename + '.tex', 'w') as tf:
tf.write('''\\centering\n\\caption{{Average rankings. Friedman test {:.2f}, p-value
{:.2e}}}\n'''.format(ftest.statistic,
ftest.pvalue) +
df_rankings.to_latex(float_format='%.2f',
column_format='c'*(1 +
df_rankings.shape[1])))
def generate_summary(errorbar=True, zoom=False):
cmap = plt.cm.get_cmap('tab20')
from cycler import cycler
default_cycler = (cycler(color=['darkred', 'forestgreen', 'darkblue', 'violet', 'darkorange', 'saddlebrown']) +
cycler(linestyle=['-', '--', '-.', '-', '--', '-.']) +
cycler(marker=['o', 'v', 'x', '*', '+', '.']) +
cycler(lw=[2, 1.8, 1.6, 1.4, 1.2, 1]))
plt.rcParams['figure.figsize'] = (5, 2.5)
plt.rcParams["figure.dpi"] = 100
plt.rc('lines', linewidth=1)
plt.rc('axes', prop_cycle=default_cycler)
files_list = glob.glob("./Example_13*summary.csv")
print('List of files to aggregate')
print(files_list)
list_ = []
for file_ in files_list:
df =
|
pandas.read_csv(file_,index_col=0, header=None, quotechar='"')
|
pandas.read_csv
|
import pandas as pd
import datetime
def formatTopStocks(top):
top_data = {"code": [], "name": [], "increase": [], "price": [],
"totalCirculationValue": [], "volume": [], "mainNet": [],
"mainBuy": [], "mainSell": [], "concept": []}
for t in top:
top_data['code'].append(t[0])
top_data['name'].append(t[1])
top_data['increase'].append(t[3])
top_data['price'].append(t[2])
top_data['totalCirculationValue'].append(t[7])
top_data['volume'].append(t[4])
top_data['mainNet'].append(t[10])
top_data['mainBuy'].append(t[8])
top_data['mainSell'].append(t[9])
top_data['concept'].append(t[12])
df = pd.DataFrame(top_data)
return df
def plateData(data:list):
date_time = []
price = []
volume = []
date = str(datetime.datetime.now().date())
for d in data:
date_time.append(date + ' ' + d[0])
price.append(d[1])
volume.append(d[3])
data = {"time": date_time, "price": price, "volume": volume}
df = pd.DataFrame(data)
return df
def topPlateFormat(data:list):
topData = {"codes": [], "names": [], "increase": [],
"rateOfIncrease": [], "mainNet": [],
"mainBuy": [], "mainSell": [],
"totalCirculationValue": []}
for d in data:
topData["codes"].append(d[0])
topData["names"].append(d[1])
topData["increase"].append(d[3])
topData["rateOfIncrease"].append(d[4])
topData["mainNet"].append(d[6])
topData["mainBuy"].append(d[7])
topData["mainSell"].append(d[8])
topData["totalCirculationValue"].append(d[10])
df =
|
pd.DataFrame(topData)
|
pandas.DataFrame
|
"""
An example of how to extract timeseries data from point locations
using WRF data stored in AWS.
Assumes that the `wrf-ak-ar5` S3 bucket is mounted at `~/wrf-ak-ar5`
Authors: <NAME> (<EMAIL>), SNAP
"""
import netCDF4 # prevent occasional obscure HDF5 issue with file locking on CentOS
# Setup logger to print to STDOUT
import logging
import sys
log = logging.getLogger()
log.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
def extract_data():
"""
Open data set, perform extraction, save CSV.
"""
log.info('Loading dataset...')
dataset = xr.open_dataset('~/wrf-ak-ar5/hourly/GFDL-CM3/historical/t2/t2_hourly_wrf_GFDL-CM3_historical_1971.nc')
log.info('Dataset loaded, processing...')
res = 20000 # grid resolution for WRF data
# get an affine transform to make the point lookups faster
affine_dataset = rasterio.transform.from_origin(
dataset.xc.min()-(res/2),
dataset.yc.max()+(res/2),
res, res)
# point locations we are going to extract from the NetCDF file
# these locations are in WGS1984 EPSG:4326
location = {
'Fairbanks' : (-147.716, 64.8378),
'Greely' : (-145.6076, 63.8858),
'Whitehorse' : (-135.074, 60.727),
'Coldfoot' : (-150.1772, 67.2524)
}
# reproject the points to the wrf-polar-stereo using geopandas
location = {
location_name:Point(lng_lat) for location_name, lng_lat in location.items()
}
dataframe =
|
pd.Series(location)
|
pandas.Series
|
# Libraries ###########################################################################################################
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn import preprocessing
import random
from sklearn.metrics import roc_curve, roc_auc_score
# Functions ###########################################################################################################
def apply_z_score(df, columns, index):
# Scale RNAseq data using z-scores
df = preprocessing.StandardScaler().fit_transform(df)
df = pd.DataFrame(df, columns=columns, index=index)
return df
def normalize_data(df, columns, index):
df = preprocessing.MinMaxScaler().fit_transform(df)
df =
|
pd.DataFrame(df, columns=columns, index=index)
|
pandas.DataFrame
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pytest
import numpy as np
import pandas
import matplotlib
import modin.pandas as pd
import io
from modin.pandas.test.utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
test_data_values,
test_data_keys,
create_test_dfs,
test_data,
)
from modin.config import NPartitions
NPartitions.put(4)
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
@pytest.mark.parametrize("method", ["items", "iteritems", "iterrows"])
def test_items_iteritems_iterrows(method):
data = test_data["float_nan_data"]
modin_df, pandas_df = pd.DataFrame(data), pandas.DataFrame(data)
for modin_item, pandas_item in zip(
getattr(modin_df, method)(), getattr(pandas_df, method)()
):
modin_index, modin_series = modin_item
pandas_index, pandas_series = pandas_item
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("name", [None, "NotPandas"])
def test_itertuples_name(name):
data = test_data["float_nan_data"]
modin_df, pandas_df = pd.DataFrame(data), pandas.DataFrame(data)
modin_it_custom = modin_df.itertuples(name=name)
pandas_it_custom = pandas_df.itertuples(name=name)
for modin_row, pandas_row in zip(modin_it_custom, pandas_it_custom):
np.testing.assert_equal(modin_row, pandas_row)
def test_itertuples_multiindex():
data = test_data["int_data"]
modin_df, pandas_df = pd.DataFrame(data), pandas.DataFrame(data)
new_idx = pd.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))]
)
modin_df.columns = new_idx
pandas_df.columns = new_idx
modin_it_custom = modin_df.itertuples()
pandas_it_custom = pandas_df.itertuples()
for modin_row, pandas_row in zip(modin_it_custom, pandas_it_custom):
np.testing.assert_equal(modin_row, pandas_row)
def test___iter__():
modin_df = pd.DataFrame(test_data_values[0])
pandas_df = pandas.DataFrame(test_data_values[0])
modin_iterator = modin_df.__iter__()
# Check that modin_iterator implements the iterator interface
assert hasattr(modin_iterator, "__iter__")
assert hasattr(modin_iterator, "next") or hasattr(modin_iterator, "__next__")
pd_iterator = pandas_df.__iter__()
assert list(modin_iterator) == list(pd_iterator)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___contains__(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
result = False
key = "Not Ex<PASSWORD>"
assert result == modin_df.__contains__(key)
assert result == (key in modin_df)
if "empty_data" not in request.node.name:
result = True
key = pandas_df.columns[0]
assert result == modin_df.__contains__(key)
assert result == (key in modin_df)
def test__options_display():
frame_data = random_state.randint(RAND_LOW, RAND_HIGH, size=(1000, 102))
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
pandas.options.display.max_rows = 10
pandas.options.display.max_columns = 10
x = repr(pandas_df)
pd.options.display.max_rows = 5
pd.options.display.max_columns = 5
y = repr(modin_df)
assert x != y
pd.options.display.max_rows = 10
pd.options.display.max_columns = 10
y = repr(modin_df)
assert x == y
# test for old fixed max values
pandas.options.display.max_rows = 75
pandas.options.display.max_columns = 75
x = repr(pandas_df)
pd.options.display.max_rows = 75
pd.options.display.max_columns = 75
y = repr(modin_df)
assert x == y
def test___finalize__():
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).__finalize__(None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___copy__(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df_copy, pandas_df_copy = modin_df.__copy__(), pandas_df.__copy__()
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___deepcopy__(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df_copy, pandas_df_copy = (
modin_df.__deepcopy__(),
pandas_df.__deepcopy__(),
)
df_equals(modin_df_copy, pandas_df_copy)
def test___repr__():
frame_data = random_state.randint(RAND_LOW, RAND_HIGH, size=(1000, 100))
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
assert repr(pandas_df) == repr(modin_df)
frame_data = random_state.randint(RAND_LOW, RAND_HIGH, size=(1000, 99))
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
assert repr(pandas_df) == repr(modin_df)
frame_data = random_state.randint(RAND_LOW, RAND_HIGH, size=(1000, 101))
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
assert repr(pandas_df) == repr(modin_df)
frame_data = random_state.randint(RAND_LOW, RAND_HIGH, size=(1000, 102))
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
assert repr(pandas_df) == repr(modin_df)
# ___repr___ method has a different code path depending on
# whether the number of rows is >60; and a different code path
# depending on the number of columns is >20.
# Previous test cases already check the case when cols>20
# and rows>60. The cases that follow exercise the other three
# combinations.
# rows <= 60, cols > 20
frame_data = random_state.randint(RAND_LOW, RAND_HIGH, size=(10, 100))
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
assert repr(pandas_df) == repr(modin_df)
# rows <= 60, cols <= 20
frame_data = random_state.randint(RAND_LOW, RAND_HIGH, size=(10, 10))
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
assert repr(pandas_df) == repr(modin_df)
# rows > 60, cols <= 20
frame_data = random_state.randint(RAND_LOW, RAND_HIGH, size=(100, 10))
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
assert repr(pandas_df) == repr(modin_df)
# Empty
pandas_df = pandas.DataFrame(columns=["col{}".format(i) for i in range(100)])
modin_df = pd.DataFrame(columns=["col{}".format(i) for i in range(100)])
assert repr(pandas_df) == repr(modin_df)
# From Issue #1705
string_data = """"time","device_id","lat","lng","accuracy","activity_1","activity_1_conf","activity_2","activity_2_conf","activity_3","activity_3_conf"
"2016-08-26 09:00:00.206",2,60.186805,24.821049,33.6080017089844,"STILL",75,"IN_VEHICLE",5,"ON_BICYCLE",5
"2016-08-26 09:00:05.428",5,60.192928,24.767222,5,"WALKING",62,"ON_BICYCLE",29,"RUNNING",6
"2016-08-26 09:00:05.818",1,60.166382,24.700443,3,"WALKING",75,"IN_VEHICLE",5,"ON_BICYCLE",5
"2016-08-26 09:00:15.816",1,60.166254,24.700671,3,"WALKING",75,"IN_VEHICLE",5,"ON_BICYCLE",5
"2016-08-26 09:00:16.413",5,60.193055,24.767427,5,"WALKING",85,"ON_BICYCLE",15,"UNKNOWN",0
"2016-08-26 09:00:20.578",3,60.152996,24.745216,3.90000009536743,"STILL",69,"IN_VEHICLE",31,"UNKNOWN",0"""
pandas_df = pandas.read_csv(io.StringIO(string_data))
modin_df = pd.read_csv(io.StringIO(string_data))
assert repr(pandas_df) == repr(modin_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_inplace_series_ops(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
if len(modin_df.columns) > len(pandas_df.columns):
col0 = modin_df.columns[0]
col1 = modin_df.columns[1]
pandas_df[col1].dropna(inplace=True)
modin_df[col1].dropna(inplace=True)
df_equals(modin_df, pandas_df)
pandas_df[col0].fillna(0, inplace=True)
modin_df[col0].fillna(0, inplace=True)
df_equals(modin_df, pandas_df)
def test___setattr__():
pandas_df = pandas.DataFrame([1, 2, 3])
modin_df = pd.DataFrame([1, 2, 3])
pandas_df.new_col = [4, 5, 6]
modin_df.new_col = [4, 5, 6]
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_isin(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
val = [1, 2, 3, 4]
pandas_result = pandas_df.isin(val)
modin_result = modin_df.isin(val)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_constructor(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
df_equals(pandas_df, modin_df)
pandas_df = pandas.DataFrame({k: pandas.Series(v) for k, v in data.items()})
modin_df = pd.DataFrame({k: pd.Series(v) for k, v in data.items()})
df_equals(pandas_df, modin_df)
@pytest.mark.parametrize(
"data",
[
np.arange(1, 10000, dtype=np.float32),
[
pd.Series([1, 2, 3], dtype="int32"),
pandas.Series([4, 5, 6], dtype="int64"),
np.array([7, 8, 9], dtype=np.float32),
],
pandas.Categorical([1, 2, 3, 4, 5]),
],
)
def test_constructor_dtypes(data):
md_df, pd_df = create_test_dfs(data)
df_equals(md_df, pd_df)
def test_constructor_columns_and_index():
modin_df = pd.DataFrame(
[[1, 1, 10], [2, 4, 20], [3, 7, 30]],
index=[1, 2, 3],
columns=["id", "max_speed", "health"],
)
pandas_df = pandas.DataFrame(
[[1, 1, 10], [2, 4, 20], [3, 7, 30]],
index=[1, 2, 3],
columns=["id", "max_speed", "health"],
)
df_equals(modin_df, pandas_df)
df_equals(pd.DataFrame(modin_df), pandas.DataFrame(pandas_df))
df_equals(
pd.DataFrame(modin_df, columns=["max_speed", "health"]),
pandas.DataFrame(pandas_df, columns=["max_speed", "health"]),
)
df_equals(
pd.DataFrame(modin_df, index=[1, 2]),
pandas.DataFrame(pandas_df, index=[1, 2]),
)
df_equals(
pd.DataFrame(modin_df, index=[1, 2], columns=["health"]),
pandas.DataFrame(pandas_df, index=[1, 2], columns=["health"]),
)
df_equals(
pd.DataFrame(modin_df.iloc[:, 0], index=[1, 2, 3]),
pandas.DataFrame(pandas_df.iloc[:, 0], index=[1, 2, 3]),
)
df_equals(
pd.DataFrame(modin_df.iloc[:, 0], columns=["NO_EXIST"]),
|
pandas.DataFrame(pandas_df.iloc[:, 0], columns=["NO_EXIST"])
|
pandas.DataFrame
|
import os
import subprocess
import pandas as pd
import time
import numpy as np
from typing import List, Union, Tuple
import re
import itertools
from shutil import copyfile
import csv
from remote_que.logger import logger
from remote_que.config import QUE_FILE_HEADER, QUE_FILE_HEADER_TYPE
from remote_que.config import DEFAULT_EDITOR, QUE_FILE_HELP
from remote_que.config import get_que_file
from remote_que.config import get_started_file, get_running_file, get_crash_file, get_lock_file
from remote_que.config import get_finished_file, get_crash_start_file
from remote_que.config import DEFAULT_RESOURCE
from remote_que.utils import check_if_process_is_running
from remote_que.resource_management import ResourceAvailability
from remote_que.run_process import SingleMachineSlot
STATE_QUE = 0
STATE_CRASHED_START = 1
STATE_CRASHED = 1
STATE_STARTED = 1
STATE_RUNNING = 1
STATE_FINISHED = 1
def write_que_data(results_folder: str, que_data: pd.DataFrame) -> bool:
que_file = get_que_file(results_folder)
lock_file = get_lock_file(results_folder)
# Must have lock to write
if not os.path.isfile(lock_file):
return False
os.remove(lock_file)
que_data.to_csv(que_file, index=False)
# Generate new lock file
with open(lock_file, "w") as f:
f.write(str(time.time()))
return True
def edit_que_data(results_folder: str):
# First remove lock file if it exists (to block QueManager from reading new procs)
que_file = get_que_file(results_folder)
lock_file = get_lock_file(results_folder)
if os.path.isfile(lock_file):
os.remove(lock_file)
else:
return 1234
# -- Can open que file for edit now.
# If que does not exist, write header file
if not os.path.isfile(que_file):
with open(que_file, "w") as f:
f.write(QUE_FILE_HEADER)
original_que = read_remote_que(results_folder)
# Open default editor
return_code = subprocess.call(f"{DEFAULT_EDITOR} {que_file}", shell=True)
if return_code == 0:
# Try read row by row and validate, log not working rows and remove
try:
que_data = read_remote_que(results_folder)
except Exception:
return_code = 666
if return_code != 0:
logger.warning(f"[ERROR] An exception occurred when writing or reading QUE FILE "
f"(@ {que_file}). - Current edited file was writen (@ {que_file}_failed)\n"
f"--- REVERTING TO PREVIOUS QUE FILE ---\n"
f"[ERROR] Fix que file! (error code: {return_code})")
logger.info(QUE_FILE_HELP)
# Write current failed file to failed & rewrite old file
copyfile(que_file, que_file + "_failed")
# Write back old csv file
original_que.to_csv(que_file, index=False)
else:
# Validate que data. It was just written
# TODO validate data
# Run match special pattern and interpret
multiply = []
for que_idx, data in que_data.iterrows():
cmd = data["shell_command"]
repl_data = []
splits = []
split = cmd
while True:
match = re.search(r"\[{([^}]*)}\]", split)
if match is None:
break
interp = eval(match[1])
if not isinstance(interp, list):
interp = [interp]
repl_data.append(interp)
span = match.span()
splits.append(split[:span[0]])
split = split[span[1]:]
if len(repl_data) <= 0:
continue
cmds = []
for combination in itertools.product(*repl_data):
new_cmd = ""
for i, sp in enumerate(combination):
new_cmd += splits[i] + str(sp)
if len(combination) < len(splits):
new_cmd += splits[-1]
cmds.append(new_cmd)
multiply.append((que_idx, cmds))
# Append new commands
for que_idx, cmds in multiply:
for new_cmd in cmds:
new_idx = len(que_data)
que_data.loc[new_idx] = que_data.loc[que_idx]
que_data.loc[new_idx, "shell_command"] = new_cmd
# Remove multiplied indexes
for que_idx, _ in multiply:
que_data = que_data.drop(que_idx)
for que_idx, data in que_data.iterrows():
# Allocate new id to newly added command
if data["command_id"] == 0:
que_data.loc[que_idx, "command_id"] = int(time.time() * 1000)
time.sleep(0.1)
# Write preprocessed new data
que_data.to_csv(que_file, index=False)
logger.info(f"[DONE] New que saved! Here is the que sorted by priority:\n"
f"{que_data.sort_values('que_priority')}\n\n")
# Generate new lock file
with open(lock_file, "w") as f:
f.write(str(time.time()))
return return_code == 0
def read_remote_que(results_folder: str) -> pd.DataFrame:
que_file = get_que_file(results_folder)
return_code = 0
if not os.path.isfile(que_file):
return_code = 9
else:
header_columns = set(QUE_FILE_HEADER.split(","))
# read and validate line by line csv que
no_columns = len(header_columns)
try:
# Read text lines
with open(que_file, "r") as f:
que_lines = f.readlines()
correct_lines = []
correct_lines_data = []
blacklisted_lines = []
columns = None
for line in que_lines:
csv_interpret = list(csv.reader([line]))[0]
if columns is None:
if len(csv_interpret) == no_columns:
columns = csv_interpret
else:
# File is corrupt from header -> must delete all
blacklisted_lines = que_file
break
continue
# Validate types
valid = True
line_data = []
for i, (k, v) in enumerate(QUE_FILE_HEADER_TYPE.items()):
if v != str:
r = None
try:
r = eval(csv_interpret[i])
except Exception as e:
pass
if not isinstance(r, v):
valid = False
break
line_data.append(r)
else:
line_data.append(csv_interpret[i])
if valid:
correct_lines.append(line)
correct_lines_data.append(line_data)
else:
blacklisted_lines.append(line)
# Write blacklisted lines to crash_starts
if len(blacklisted_lines) > 0:
write_lines = "\n".join(blacklisted_lines) + "\n"
logger.warning(f"Cannot read lines: \n{write_lines}")
with open(get_crash_start_file(results_folder), "a") as f:
f.writelines(blacklisted_lines)
que_data =
|
pd.DataFrame(correct_lines_data, columns=columns)
|
pandas.DataFrame
|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])
expected = tm.box_expected(expected, box_with_array)
if box_other:
other = tm.box_expected(other, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dtarr, other)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = DatetimeIndex(exp, tz=tz)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = Timestamp("1700-01-31")
td = Timedelta("20000 Days")
dti = date_range("1949-09-30", freq="100Y", periods=4)
ser = Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = NaT
expected = Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = NaT
expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([Timestamp.min])
t1 = tmin + Timedelta.max + Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([Timestamp.max])
t2 = tmax + Timedelta.min - Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
Timestamp("20111230"),
Timestamp("20120101"),
Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
Timestamp("20111231"),
Timestamp("20120102"),
Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = Series(dti)
expected = Series(TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize(
"dt64_series",
[
Series([Timestamp("19900315"), Timestamp("19900315")]),
Series([NaT, Timestamp("19900315")]),
Series([NaT, NaT], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("one", [1, 1.0, np.array(1)])
def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
# multiplication
msg = "cannot perform .* with this index type"
with pytest.raises(TypeError, match=msg):
dt64_series * one
with pytest.raises(TypeError, match=msg):
one * dt64_series
# division
with pytest.raises(TypeError, match=msg):
dt64_series / one
with pytest.raises(TypeError, match=msg):
one / dt64_series
# TODO: parametrize over box
def test_dt64_series_add_intlike(self, tz_naive_fixture):
# GH#19123
tz = tz_naive_fixture
dti = DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz)
ser = Series(dti)
other = Series([20, 30, 40], dtype="uint8")
msg = "|".join(
[
"Addition/subtraction of integers and integer-arrays",
"cannot subtract .* from ndarray",
]
)
assert_invalid_addsub_type(ser, 1, msg)
assert_invalid_addsub_type(ser, other, msg)
assert_invalid_addsub_type(ser, np.array(other), msg)
assert_invalid_addsub_type(ser, pd.Index(other), msg)
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
assert td2._values.freq is None
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
td1[0] - dt1
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError, match=msg):
td2[0] - dt2
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "cannot (add|subtract)"
with pytest.raises(TypeError, match=msg):
td1 - dt1
with pytest.raises(TypeError, match=msg):
td2 - dt2
class TestDatetimeIndexArithmetic:
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_addsub_int(self, tz_naive_fixture, one):
# Variants of `one` for #19012
tz = tz_naive_fixture
rng = date_range("2000-01-01 09:00", freq="H", periods=10, tz=tz)
msg = "Addition/subtraction of integers"
with pytest.raises(TypeError, match=msg):
rng + one
with pytest.raises(TypeError, match=msg):
rng += one
with pytest.raises(TypeError, match=msg):
rng - one
with pytest.raises(TypeError, match=msg):
rng -= one
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("freq", ["H", "D"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "|".join(
["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
)
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("freq", ["W", "M", "MS", "Q"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_non_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "|".join(
["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
)
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_no_freq(self, int_holder):
# GH#19959
dti = DatetimeIndex(["2016-01-01", "NaT", "2017-04-05 06:07:08"])
other = int_holder([9, 4, -1])
msg = "|".join(
["cannot subtract DatetimeArray from", "Addition/subtraction of integers"]
)
assert_invalid_addsub_type(dti, other, msg)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
expected = expected._with_freq(None)
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .*TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
expected = expected._with_freq(None)
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
# DTA.__isub__ GH#43904
dta = dti._data.copy()
dta -= tdi
tm.assert_datetime_array_equal(dta, expected._data)
out = dti._data.copy()
np.subtract(out, tdi, out=out)
tm.assert_datetime_array_equal(out, expected._data)
msg = "cannot subtract .* from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract DatetimeArray from ndarray"
with pytest.raises(TypeError, match=msg):
tdi.values -= dti
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi._values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
@pytest.mark.parametrize(
"addend",
[
datetime(2011, 1, 1),
DatetimeIndex(["2011-01-01", "2011-01-02"]),
DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize("US/Eastern"),
np.datetime64("2011-01-01"),
Timestamp("2011-01-01"),
],
ids=lambda x: type(x).__name__,
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_add_datetimelike_and_dtarr(self, box_with_array, addend, tz):
# GH#9631
dti = DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize(tz)
dtarr = tm.box_expected(dti, box_with_array)
msg = "cannot add DatetimeArray and"
assert_cannot_add(dtarr, addend, msg)
# -------------------------------------------------------------
def test_dta_add_sub_index(self, tz_naive_fixture):
# Check that DatetimeArray defers to Index classes
dti = date_range("20130101", periods=3, tz=tz_naive_fixture)
dta = dti.array
result = dta - dti
expected = dti - dti
tm.assert_index_equal(result, expected)
tdi = result
result = dta + tdi
expected = dti + tdi
tm.assert_index_equal(result, expected)
result = dta - tdi
expected = dti - tdi
tm.assert_index_equal(result, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range("20130101", periods=3)
dti_tz = date_range("20130101", periods=3).tz_localize("US/Eastern")
dti_tz2 = date_range("20130101", periods=3).tz_localize("UTC")
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
msg = "DatetimeArray subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dti_tz - dti
with pytest.raises(TypeError, match=msg):
dti - dti_tz
with pytest.raises(TypeError, match=msg):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range("20130101", periods=3)
dti2 = date_range("20130101", periods=4)
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(["2012-01-01", np.nan, "2012-01-03"])
dti2 = DatetimeIndex(["2012-01-02", "2012-01-03", np.nan])
expected = TimedeltaIndex(["1 days", np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------------
# TODO: Most of this block is moved from series or frame tests, needs
# cleanup, box-parametrization, and de-duplication
@pytest.mark.parametrize("op", [operator.add, operator.sub])
def test_timedelta64_equal_timedelta_supported_ops(self, op, box_with_array):
ser = Series(
[
Timestamp("20130301"),
Timestamp("20130228 23:00:00"),
Timestamp("20130228 22:00:00"),
Timestamp("20130228 21:00:00"),
]
)
obj = box_with_array(ser)
intervals = ["D", "h", "m", "s", "us"]
def timedelta64(*args):
# see casting notes in NumPy gh-12927
return np.sum(list(starmap(np.timedelta64, zip(args, intervals))))
for d, h, m, s, us in product(*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s, microseconds=us)
lhs = op(obj, nptd)
rhs = op(obj, pytd)
tm.assert_equal(lhs, rhs)
def test_ops_nat_mixed_datetime64_timedelta64(self):
# GH#11349
timedelta_series = Series([NaT, Timedelta("1s")])
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]")
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]")
# subtraction
tm.assert_series_equal(
datetime_series - single_nat_dtype_datetime, nat_series_dtype_timedelta
)
tm.assert_series_equal(
datetime_series - single_nat_dtype_timedelta, nat_series_dtype_timestamp
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + datetime_series, nat_series_dtype_timestamp
)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
tm.assert_series_equal(
nat_series_dtype_timestamp - single_nat_dtype_datetime,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
nat_series_dtype_timestamp - single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
msg = "cannot subtract a datelike"
with pytest.raises(TypeError, match=msg):
timedelta_series - single_nat_dtype_datetime
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
nat_series_dtype_timestamp + single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_datetime,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_datetime + nat_series_dtype_timedelta,
nat_series_dtype_timestamp,
)
def test_ufunc_coercions(self):
idx = date_range("2011-01-01", periods=3, freq="2D", name="x")
delta = np.timedelta64(1, "D")
exp = date_range("2011-01-02", periods=3, freq="2D", name="x")
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
exp = date_range("2010-12-31", periods=3, freq="2D", name="x")
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
# When adding/subtracting an ndarray (which has no .freq), the result
# does not infer freq
idx = idx._with_freq(None)
delta = np.array(
[np.timedelta64(1, "D"), np.timedelta64(2, "D"), np.timedelta64(3, "D")]
)
exp = DatetimeIndex(["2011-01-02", "2011-01-05", "2011-01-08"], name="x")
for result in [idx + delta, np.add(idx, delta)]:
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
exp =
|
DatetimeIndex(["2010-12-31", "2011-01-01", "2011-01-02"], name="x")
|
pandas.DatetimeIndex
|
import builtins
from io import StringIO
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna
import pandas._testing as tm
import pandas.core.nanops as nanops
from pandas.util import _test_decorators as td
@pytest.fixture(
params=[np.int32, np.int64, np.float32, np.float64],
ids=["np.int32", "np.int64", "np.float32", "np.float64"],
)
def numpy_dtypes_for_minmax(request):
"""
Fixture of numpy dtypes with min and max values used for testing
cummin and cummax
"""
dtype = request.param
min_val = (
np.iinfo(dtype).min if np.dtype(dtype).kind == "i" else np.finfo(dtype).min
)
max_val = (
np.iinfo(dtype).max if np.dtype(dtype).kind == "i" else np.finfo(dtype).max
)
return (dtype, min_val, max_val)
@pytest.mark.parametrize("agg_func", ["any", "all"])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize(
"vals",
[
["foo", "bar", "baz"],
["foo", "", ""],
["", "", ""],
[1, 2, 3],
[1, 0, 0],
[0, 0, 0],
[1.0, 2.0, 3.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[True, True, True],
[True, False, False],
[False, False, False],
[np.nan, np.nan, np.nan],
],
)
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({"key": ["a"] * 3 + ["b"] * 3, "val": vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == "any":
exp = False
exp_df = DataFrame([exp] * 2, columns=["val"], index=Index(["a", "b"], name="key"))
result = getattr(df.groupby("key"), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({"nn": [11, 11, 22, 22], "ii": [1, 2, 3, 4], "ss": 4 * ["mama"]})
result = aa.groupby("nn").max()
assert "ss" in result
result = aa.groupby("nn").max(numeric_only=False)
assert "ss" in result
result = aa.groupby("nn").min()
assert "ss" in result
result = aa.groupby("nn").min(numeric_only=False)
assert "ss" in result
def test_min_date_with_nans():
# GH26321
dates = pd.to_datetime(
pd.Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d"
).dt.date
df = pd.DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates})
result = df.groupby("b", as_index=False)["c"].min()["c"]
expected = pd.to_datetime(
pd.Series(["2019-05-09", "2019-05-09"], name="c"), format="%Y-%m-%d"
).dt.date
tm.assert_series_equal(result, expected)
result = df.groupby("b")["c"].min()
expected.index.name = "b"
tm.assert_series_equal(result, expected)
def test_intercept_builtin_sum():
s = Series([1.0, 2.0, np.nan, 3.0])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(builtins.sum)
result2 = grouped.apply(builtins.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize("keys", ["jim", ["jim", "joe"]]) # Single key # Multi-key
def test_builtins_apply(keys, f):
# see gh-8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)), columns=["jim", "joe"])
df["jolie"] = np.random.randn(1000)
fname = f.__name__
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
assert_msg = f"invalid frame shape: {result.shape} (expected ({ngroups}, 3))"
assert result.shape == (ngroups, 3), assert_msg
tm.assert_frame_equal(
result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)),
)
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
|
tm.assert_frame_equal(result, expected, check_dtype=False)
|
pandas._testing.assert_frame_equal
|
"""
Utility functions for working with DataFrames
"""
import pandas as pd
import numpy as np
TEST_DF = pd.DataFrame([1,2,3])
def date_splitter(df):
df[:5]
df['year'] = df['date'].dt.year
df['month'] = df['date'].dt.month
df['day'] = df['date'].dt.day
df['hour'] = df['date'].dt.hour
df['minute'] = df['date'].dt.minute
df_new=
|
pd.DataFrame(df)
|
pandas.DataFrame
|
from pydrive.auth import GoogleAuth
import io
from pydrive.drive import GoogleDrive
import datetime
from datetime import timedelta
import email, smtplib, ssl
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import pandas as pd
import pytz
gauth = GoogleAuth()
# get previous auth credentials if available. This prevents the need to re-auth the script with Google.
# If there is no credentials.txt, then a webserver and browser launches to perform the Auth with Google.
gauth.LoadCredentialsFile("credentials.txt")
if gauth.credentials is None:
gauth.LocalWebserverAuth()
elif gauth.access_token_expired:
gauth.Refresh()
else:
gauth.Authorize()
gauth.SaveCredentialsFile("credentials.txt")
drive = GoogleDrive(gauth)
# get the file lists and information from the follwoing folders.
folder1 = drive.ListFile({'q': "'--folderid--' in parents and trashed=false"}).GetList()
folder2 = drive.ListFile({'q': "'--folderid--' in parents and trashed=false"}).GetList()
folder3 = drive.ListFile({'q': "'--folderid--' in parents and trashed=false"}).GetList()
# Create dataframes from the dicts created by reading the folder contents
df1 = pd.DataFrame.from_dict(folder1)
df2 = pd.DataFrame.from_dict(folder2)
df3 = pd.DataFrame.from_dict(folder3)
new = pd.concat([df1,df2,df3], axis = 0, sort = False)
new.reset_index()
new['modifiedDate']=
|
pd.to_datetime(new['modifiedDate'])
|
pandas.to_datetime
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import integrate
import pandas as pd
import pdb
class Evaluator:
def __init__(self, gold_standard_file = None, sep='\t', interaction_label='regulator-target', node_list=None, subnet_dict=None):
if (gold_standard_file is None) and (subnet_dict is not None):
self.gs_flat = pd.Series(subnet_dict['true_edges'])
self.full_list = pd.Series(subnet_dict['edges'])
elif gold_standard_file is not None:
self.gs_file = gold_standard_file
self.gs_data = pd.read_csv(gold_standard_file, sep=sep, header=None)
self.gs_data.columns = ['regulator','target','exists']
self.gs_data['regulator-target'] = list(zip(self.gs_data.regulator, self.gs_data.target))
self.interaction_label = interaction_label
self.gs_flat = self.gs_data[self.gs_data['exists'] > 0]['regulator-target']
self.gs_neg = self.gs_data[self.gs_data['exists'] == 0]['regulator-target']
#ecoli has a unique gold standard file
if 'ecoli' in self.gs_file:
self.regulators = ["G"+str(x) for x in range(1,335)]
self.targets = ["G"+str(x) for x in range(1,4512)]
self.full_list = tuple(map(tuple,self.possible_edges(np.array(self.regulators),np.array(self.targets))))
elif 'omranian' in self.gs_file:
with open('../../data/invitro/omranian_parsed_tf_list.tsv', 'r') as f:
self.regulators = f.read().splitlines()
with open('../../data/invitro/omranian_all_genes_list.tsv', 'r') as f:
self.targets = f.read().splitlines()
self.full_list = tuple(map(tuple, self.possible_edges(np.array(self.regulators), np.array(self.targets))))
elif 'dream5' in self.gs_file:
with open('../../data/dream5/insilico_transcription_factors.tsv', 'r') as f:
self.regulators = f.read().splitlines()
fp = '../../data/dream5/insilico_timeseries.tsv'
df = pd.read_csv(fp, sep='\t')
geneids = df.columns.tolist()
geneids.pop(0)
self.targets = geneids
self.full_list = tuple(map(tuple, self.possible_edges(np.array(self.regulators), np.array(self.targets))))
elif node_list:
all_regulators = np.array(list(set(node_list)))
self.full_list = tuple(map(tuple,self.possible_edges(all_regulators,all_regulators)))
else:
#more robust version of defining the full list
all_regulators = self.gs_data['regulator'].unique().tolist()
all_targets = self.gs_data['target'].unique().tolist()
all_regulators.extend(all_targets)
all_regulators = np.array(list(set(all_regulators)))
self.full_list = tuple(map(tuple,self.possible_edges(all_regulators,
all_regulators)))
#remove self edges
self.full_list = [ x for x in self.full_list if x[0] != x[1] ]
self.full_list = pd.Series(self.full_list)
def possible_edges(self,parents, children):
"""
Create a list of all the possible edges between parents and children
:param parents: array
labels for parents
:param children: array
labels for children
:return: array, length = parents * children
array of parent, child combinations for all possible edges
"""
parent_index = range(len(parents))
child_index = range(len(children))
a, b = np.meshgrid(parent_index, child_index)
parent_list = parents[a.flatten()]
child_list = children[b.flatten()]
possible_edge_list = np.array(list(zip(parent_list, child_list)))
return possible_edge_list
def create_link_list(self,df, w):
parent_names = df.index.values
child_names = df.columns.values
edges = self.possible_edges(parent_names, child_names)
parents = edges[:, 0]
children = edges[:, 1]
directed_edges = df.values.flatten()
all_edges = np.abs(directed_edges)
ll_array = [parents, children, list(zip(parents, children)), directed_edges, all_edges, w]
link_list =
|
pd.DataFrame(ll_array)
|
pandas.DataFrame
|
import os
import pandas as pd
def bea_use(data_dir):
from .parse import bea_use
data_dir = os.path.join(data_dir, "windc_2_0_1", "BEA", "IO")
df = []
for i in dir(bea_use):
if callable(getattr(bea_use, i)):
df.append(getattr(bea_use, i)(data_dir))
df =
|
pd.concat(df, ignore_index=True)
|
pandas.concat
|
from re import split
import joblib
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report
from sklearn.tree import DecisionTreeClassifier
from sb_capstone.shaping import (
_simplify_gender,
_transform_age_group,
_transform_generation,
_explode_membership_date,
_extract_age_bins,
_transform_gender
)
select_model = joblib.load("../models/select_offer.pkl")
receive_model = joblib.load("../models/receive_offer.pkl")
def train_receive_offer(data, file):
"""Trains data to create model to determine if a customer will receive an offer.
Args:
data (pandas.DataFrame): Data to train model on.
file (str): File to save model to.
Returns:
str: File where the model is saved.
dict: Classification report.
"""
y = data.purchased
X = data.drop(columns=["purchased"])
X_train, X_test, y_train, y_test = train_test_split(X, y)
clf = DecisionTreeClassifier(criterion="gini", splitter="random")
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
score = classification_report(y_test, y_pred, zero_division=True, output_dict=True)
joblib.dump(clf, file)
return file, score
def train_select_offer(data, file):
"""Trains data to create model to determine which offers to show to a customer.
Args:
data (pandas.DataFrame): Data to train model on.
file (str): File to save model to.
Returns:
str: File where the model is saved.
dict: Classification report.
"""
y_cols = np.arange(1, 11).astype(str).tolist()
y = data[y_cols]
X = data[data.columns[~data.columns.isin(y_cols)]]
X_train, X_test, y_train, y_test = train_test_split(X, y)
clf = MultiOutputClassifier(
DecisionTreeClassifier(criterion="gini", splitter="random"),
)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
score = classification_report(y_test, y_pred, zero_division=True, output_dict=True)
joblib.dump(clf, file)
return file, score
def _convert_for_select(profile):
"""Convert profile to be fed into the select model.
Args:
profile (pandas.DataFrame): Profile to convert.
Returns:
pandas.DataFrame: Converted profile.
"""
without_profile = profile[profile.age.isna()].reset_index(drop=True)
profile = profile[~profile.age.isna()].reset_index(drop=True)
profile = _simplify_gender(
_explode_membership_date(profile))
return profile, without_profile
def select_offer(profile, model = select_model, default_offers = []):
"""Predict which offers to show to a customer.
Args:
profile (pandas.DataFrame): Profile to predict offers for.
model (sklearn.model_selection.Model): Model to use to predict offers.
default_offers (list): Default offers to show to a customer who are anonymous.
Returns:
pandas.DataFrame: Profile with offers.
"""
profile, without_profile = _convert_for_select(profile)
offer_cols = np.arange(1, 11).astype(str).tolist()
profile[offer_cols] = np.zeros(10, dtype=int).tolist()
if len(profile) > 0:
cols = [
"gender",
"age",
"income",
"membership_year",
"membership_month",
"membership_day"
]
y = pd.DataFrame(model.predict(profile[cols]), columns=offer_cols)
profile[offer_cols] = y
profile = profile[["id"] + offer_cols]
profile = pd.melt(profile, id_vars="id", value_vars=np.arange(1, 11).astype(str).tolist(), var_name="recommended_offers")
profile = profile[profile.value == 1]
profile = profile.groupby("id").agg({"recommended_offers": lambda x: x.tolist()}).reset_index()
without_profile["recommended_offers"] = [default_offers] * without_profile.shape[0]
without_profile = without_profile[["id", "recommended_offers"]]
results =
|
pd.concat([profile, without_profile])
|
pandas.concat
|
from single_bet_type_analyzer import SingleBetTypeAnalyzer
from dask.distributed import Client, LocalCluster
import pandas as pd
class EnsembleBetAnalyzer:
def __init__(self, cluster=None, live=True, offline=True, headless=True):
if cluster is None:
self.cluster = LocalCluster(processes=False)
else:
self.cluster = cluster
self.client = Client(self.cluster)
# futures = [
# self.client.submit(SingleBetTypeAnalyzer, 'calcio', '1x2', self.cluster),
# self.client.submit(SingleBetTypeAnalyzer, 'calcio', 'uo1.5', self.cluster),
# self.client.submit(SingleBetTypeAnalyzer, 'calcio', 'uo2.5', self.cluster),
# self.client.submit(SingleBetTypeAnalyzer, 'calcio', 'uo3.5', self.cluster),
# self.client.submit(SingleBetTypeAnalyzer, 'calcio', 'uo4.5', self.cluster),
# self.client.submit(SingleBetTypeAnalyzer, 'basket', '12', self.cluster),
# self.client.submit(SingleBetTypeAnalyzer, 'tennis', '12', self.cluster),
# ]
# self.analyzers = [f.result() for f in futures]
self.analyzers = [
SingleBetTypeAnalyzer('calcio', '1x2', self.cluster, live=live, offline=offline, headless=headless),
# SingleBetTypeAnalyzer('calcio', 'uo1.5', self.cluster, live=live, offline=offline, headless=headless),
SingleBetTypeAnalyzer('calcio', 'uo2.5', self.cluster, live=live, offline=offline, headless=headless),
# SingleBetTypeAnalyzer('calcio', 'uo3.5', self.cluster, live=live, offline=offline, headless=headless),
# SingleBetTypeAnalyzer('calcio', 'uo4.5', self.cluster, live=live, offline=offline, headless=headless),
# SingleBetTypeAnalyzer('basket', '12', self.cluster, live=live, offline=offline, headless=headless),
SingleBetTypeAnalyzer('tennis', '12', self.cluster, live=live, offline=offline, headless=headless),
]
def close(self):
[analyzer.close() for analyzer in self.analyzers]
self.client.close()
self.cluster.close()
def analyze_bets(self):
results = [analyzer.analyze_bets() for analyzer in self.analyzers]
df =
|
pd.concat(results)
|
pandas.concat
|
#!/usr/bin/env python
import copy
import gzip
import logging
import multiprocessing
import os
import random
import time
import traceback
from collections import defaultdict
import numpy as np
import pandas as pd
import pysam
from Bio import SeqIO
from numba import jit
from tqdm import tqdm
import inStrain.logUtils
import inStrain.profile.fasta
import inStrain.controller
global i2o
global v2o
i2o = {'nm':0, 'insert_distance':1, 'mapq':2, 'length':3, 'reads':4, 'start':5, 'stop':6}
v2o = {'min_read_ani':0, 'max_insert':1, 'min_insert':2, 'min_mapq':3}
class Controller():
def main_from_profile(self, ISP, bam, **kwargs):
'''
The main method when called from the "profile" module
Args:
ISP = pre-initialized inStrain profile
bam = location of .bam file
args = the rest of the command line arguments
Returns:
Ridc = dictionary of read -> mismatches
RR = pandas dataframe describing filtering
'''
detailed_report = kwargs.get('detailed_mapping_info', False)
# Set up and parse .fasta file
inStrain.logUtils.log_checkpoint("FilterReads", "load_fasta", "start")
fasta_db, scaff2sequence, s2l = inStrain.profile.fasta.load_fasta(**kwargs)
scaffolds = list(fasta_db['scaffold'].unique())
inStrain.logUtils.log_checkpoint("FilterReads", "load_fasta", "end")
inStrain.controller.patch_mp_connection_bpo_17560()
# Filter the reads and store read reports
if detailed_report:
Rdic, RR, dRR = load_paired_reads(bam, scaffolds, **kwargs)
# Store and delete the detailed report
ISP.store('detailed_mapping_info', dRR, 'pandas', "Details report on reads")
del dRR
else:
Rdic, RR = load_paired_reads(bam, scaffolds, **kwargs)
# Return the Rdic and ReadReport
return Rdic, RR, fasta_db, scaff2sequence, s2l
def main(self, args):
'''
The main method when called explicitly (as its own module)
'''
bam = args.bam
vargs = vars(args)
del vargs['bam']
detailed_report = vargs.get('detailed_mapping_info', False)
generate_sam = vargs.get('generate_sam', False)
out_folder = vargs.get('output', False)
# Set up the output folder
if not os.path.isdir(out_folder):
os.mkdir(out_folder)
# Set up .fasta file
FAdb, s2s = load_fasta(args.fasta)
# Get the paired reads
scaffolds = list(FAdb['scaffold'].unique())
if detailed_report:
Rdic, RR, dRR = load_paired_reads(bam, scaffolds, **vargs)
else:
Rdic, RR = load_paired_reads(bam, scaffolds, **vargs)
dRR = None
# Make a .sam
if generate_sam:
print("The ability to make .sam files is not finished yet; sorry!")
# Save results
self.write_results(out_folder, RR, dRR, **vargs)
def write_results(self, out_folder, RR, dRR, **kwargs):
'''
Save the results in a folder for the filter_reads module
'''
assert os.path.isdir(out_folder)
RR_loc = os.path.join(out_folder, 'mapping_info.csv')
write_mapping_info(RR, RR_loc, **kwargs)
if dRR is not None:
RR_loc = os.path.join(out_folder, 'detailed_mapping_info.csv')
dRR.to_csv(RR_loc, index=False, sep='\t')
def read_profile_worker(read_cmd_queue, read_result_queue, bam, single_thread=False):
'''
Worker to filter reads
'''
# Apply patch
inStrain.controller.patch_mp_connection_bpo_17560()
# Initilize the .bam file
bam_init = samfile = pysam.AlignmentFile(bam)
while True:
if not single_thread:
cmds = read_cmd_queue.get(True)
else:
try:
cmds = read_cmd_queue.get(timeout=5)
except:
return
dicts, log = scaffold_profile_wrapper(cmds, bam_init)
read_result_queue.put((dicts, log))
# Clean up memory
for d in dicts:
del d
del log
del dicts
def load_paired_reads(bam, scaffolds, **kwargs):
'''
Load paired reads to be profiled
You have this method do a lot of things because all of these things take lots of RAM, and you want them all to be cleared as soon as possible
Return a dictionary of results. Some things that could be in it are:
pair2infoF: A filtered dictionary of read name to number of mismatches
RR: A summary read reaport
RR_detailed: A detailed read report
'''
# Parse the kwargs
detailed_report = kwargs.get('detailed_mapping_info', False)
priority_reads_loc = kwargs.get('priority_reads', None)
# Establish tallys to keep track of numbers
tallys = {}
# Get the pairs
inStrain.logUtils.log_checkpoint("FilterReads", "get_paired_reads_multi", "start")
scaff2pair2info = get_paired_reads_multi(bam, scaffolds, **kwargs)
inStrain.logUtils.log_checkpoint("FilterReads", "get_paired_reads_multi", "end")
if detailed_report:
dRR = make_detailed_mapping_info(scaff2pair2info)
# Handle paired-read filtering
inStrain.logUtils.log_checkpoint("FilterReads", "paired_reads", "start")
priority_reads = load_priority_reads(priority_reads_loc)
scaff2pair2info = paired_read_filter(scaff2pair2info, priority_reads_set=priority_reads, tallys=tallys, **kwargs)
inStrain.logUtils.log_checkpoint("FilterReads", "paired_reads", "end")
# Filter and make the report
inStrain.logUtils.log_checkpoint("FilterReads", "filter_reads", "start")
scaff2pair2infoF, RR = filter_scaff2pair2info(scaff2pair2info, tallys,
priority_reads_set=priority_reads,
**kwargs)
inStrain.logUtils.log_checkpoint("FilterReads", "filter_reads", "end")
if detailed_report:
return scaff2pair2infoF, RR, dRR
else:
return scaff2pair2infoF, RR
def filter_scaff2pair2info(scaff2pair2info, tallys={}, priority_reads_set=set(), **kwargs):
'''
Filter scaff2pair2info and generate a read report
'''
# Set up priority reads
assert type(kwargs.get('priority_reads', 'none')) != type(set())
priority_reads = priority_reads_set
assert type(priority_reads) == type(set()), type(priority_reads)
#item2order, to make it easier to read
i2o = {'nm':0, 'insert_distance':1, 'mapq':2, 'length':3, 'reads':4, 'start':5, 'stop':6}
# Calculate max insert
max_insert_relative = kwargs.get('max_insert_relative', 3)
median_insert = np.median([value[i2o['insert_distance']] for scaff, pair2info \
in scaff2pair2info.items() for pair, value in pair2info.items()\
if value[i2o['reads']] == 2])
max_insert = median_insert * max_insert_relative
# Get filter values
values = {}
values['min_mapq'] = kwargs.get('min_mapq', 2)
values['max_insert'] = max_insert
values['min_insert'] = kwargs.get('min_insert', 50)
values['min_read_ani'] = kwargs.get('min_read_ani', 0.97)
values['pairing_filter'] = kwargs.get('pairing_filter', 'paired_only')
# Set up the filtered dictionary
scaff2pair2mm = {}
# Make tallys for individual scaffolds
table = defaultdict(list)
for scaff, pair2info in scaff2pair2info.items():
# Do the tallys
if scaff not in tallys:
tallys[scaff] = defaultdict(int)
# Initialize some columns
for c in ["pass_pairing_filter", "pass_min_read_ani", "pass_max_insert",
"pass_min_insert", "pass_min_mapq", "filtered_pairs",
"filtered_singletons", "filtered_priority_reads"]:
tallys[scaff][c] = 0
scaff2pair2mm[scaff] = {}
for pair, info in pair2info.items():
update_tallys(tallys, pair, info, values, scaff, scaff2pair2mm, priority_reads)
# Make into a table
table['scaffold'].append(scaff)
for key, value in tallys[scaff].items():
table[key].append(value)
if len(pair2info.keys()) > 0:
# Do the means
for i, att in enumerate(['mistmaches', 'insert_distance', 'mapq_score', 'pair_length']):
table['mean_' + att].append(np.mean([info[i] for pair, info in pair2info.items()]))
table['mean_PID'].append(np.mean([(1 - (float(info[i2o['nm']]) /
float(info[i2o['length']]))) for pair, info in pair2info.items()]))
# Do a the medians
table['median_insert'].append(np.median([info[i2o['insert_distance']] for pair, info in pair2info.items()]))
else:
for att in ['mistmaches', 'insert_distance', 'mapq_score', 'pair_length']:
table['mean_' + att].append(np.nan)
table['mean_PID'].append(np.nan)
table['median_insert'].append(np.nan)
try:
Adb = pd.DataFrame(table)
except:
for k, v in table.items():
print(k, len(v))
assert False
# Make tallys for all scaffolds
table = defaultdict(list)
table['scaffold'].append('all_scaffolds')
CAdb = Adb[Adb['pass_pairing_filter'] > 0]
total_reads = CAdb['pass_pairing_filter'].sum()
for c in list(Adb.columns):
if c == 'scaffold':
pass
elif c.startswith('mean_'):
table[c].append(sum([v * m for v, m in zip(CAdb[c],
CAdb['pass_pairing_filter'])])/total_reads)
elif c.startswith('median_'):
table[c].append(sum([v * m for v, m in zip(CAdb[c],
CAdb['pass_pairing_filter'])])/total_reads)
else:
table[c].append(int(CAdb[c].sum()))
adb = pd.DataFrame(table)
# Concat
Rdb = pd.concat([adb, Adb]).reset_index(drop=True)
return scaff2pair2mm, Rdb
# def update_tallys(tallys, pair, info, values, scaffold, scaff2pair2mm, priority_reads):
# '''
# The meat of filter_scaff2pair2info
# '''
# # Evaluate this pair
# tallys[scaffold]['pass_pairing_filter'] += 1
# f_results = evaluate_pair(info, values)
#
# # Tally the results for what filteres passed
# for name, index in v2o.items():
# tallys[scaffold]['pass_' + name] += f_results[index]
#
# # Tally the results for if the whole pair passed
# if f_results.sum() == 4:
# tallys[scaffold]['filtered_pairs'] += 1
# scaff2pair2mm[scaffold][pair] = info[0]
#
# if info[i2o['reads']] == 1:
# tallys[scaffold]['filtered_singletons'] += 1
#
# if pair in priority_reads:
# tallys[scaffold]['filtered_priority_reads'] += 1
#
#
# i2o = {'nm':0, 'insert_distance':1, 'mapq':2, 'length':3, 'reads':4, 'start':5, 'stop':6}
# v2o = {'min_read_ani':0, 'max_insert':1, 'min_insert':2, 'min_mapq':3}
# def evaluate_pair(info, values):
# '''
# Return a list of the filters that this pair passes and fails
# Argumnets:
# info: np array listing info about this pair in the i2o order
# values: dictionary listing the filters to use when evaluting this pair
# Returns:
# f_resilts: np array listing which filters pass (1) and fail (0) in v2o order
# '''
# # Initialize results for this pair
# f_results = np.zeros(4)
#
# # Handle PID
# PID = 1 - (float(info[i2o['nm']]) / float(info[i2o['length']]))
# if PID > values['min_read_ani']:
# f_results[v2o['min_read_ani']] = 1
#
# # Handle mapQ
# if info[i2o['mapq']] > values['min_mapq']:
# f_results[v2o['min_mapq']] = 1
#
# # If this is a pair check insert distance:
# if ((info[i2o['reads']] == 2) & (info[i2o['insert_distance']] != -1)):
# if info[i2o['insert_distance']] > values['min_insert']:
# f_results[v2o['min_insert']] = 1
# if info[i2o['insert_distance']] < values['max_insert']:
# f_results[v2o['max_insert']] = 1
#
# # Otherwise give those a pass
# else:
# f_results[v2o['min_insert']] = 1
# f_results[v2o['max_insert']] = 1
#
# return f_results
def update_tallys(tallys, pair, info, values, scaffold, scaff2pair2mm, priority_reads):
'''
The meat of filter_scaff2pair2info
'''
# Evaluate this pair
tallys[scaffold]['pass_pairing_filter'] += 1
f_results = evaluate_pair(info, np.zeros(4), values['min_read_ani'], values['min_mapq'], values['min_insert'],
values['max_insert'])
# Tally the results for what filteres passed
for name, index in v2o.items():
tallys[scaffold]['pass_' + name] += f_results[index]
# Tally the results for if the whole pair passed
if f_results.sum() == 4:
tallys[scaffold]['filtered_pairs'] += 1
scaff2pair2mm[scaffold][pair] = info[0]
if info[i2o['reads']] == 1:
tallys[scaffold]['filtered_singletons'] += 1
if pair in priority_reads:
tallys[scaffold]['filtered_priority_reads'] += 1
@jit(nopython=True)
def evaluate_pair(info, f_results, min_read_ani, min_mapq, min_insert, max_insert):
'''
Return a list of the filters that this pair passes and fails
Argumnets:
info: np array listing info about this pair in the i2o order
values: dictionary listing the filters to use when evaluting this pair
Returns:
f_resilts: np array listing which filters pass (1) and fail (0) in v2o order
i2o = {'nm':0, 'insert_distance':1, 'mapq':2, 'length':3, 'reads':4, 'start':5, 'stop':6}
v2o = {'min_read_ani':0, 'max_insert':1, 'min_insert':2, 'min_mapq':3}
'''
# Initialize results for this pair
#f_results = np.zeros(4)
# Handle PID
PID = 1 - (float(info[0]) / float(info[3]))
if PID > min_read_ani:
f_results[0] = 1
# Handle mapQ
if info[2] > min_mapq:
f_results[3] = 1
# If this is a pair check insert distance:
if ((info[4] == 2) & (info[1] != -1)):
if info[1] > min_insert:
f_results[2] = 1
if info[1] < max_insert:
f_results[1] = 1
# Otherwise give those a pass
else:
f_results[1] = 1
f_results[2] = 1
return f_results
def load_priority_reads(file_loc):
'''
Loads a file of reads and returns a set of their names
'''
# is it None?
if file_loc is None:
return set()
# Is it zipped?
if file_loc[-3:] == '.gz':
reader = gzip.open(file_loc, 'rt')
else:
reader = open(file_loc, 'r')
# Figure out the type
for line in reader.readlines():
if line[0] == '@':
TYPE = 'fastq'
else:
TYPE = 'list'
break
reader.close()
if file_loc[-3:] == '.gz':
reader = gzip.open(file_loc, 'rt')
else:
reader = open(file_loc, 'r')
reads = set()
if TYPE == 'fastq':
for line in reader.readlines():
if line[0] != '@':
continue
reads.add(line[1:].strip())
elif TYPE == 'list':
for line in reader.readlines():
reads.add(line.strip())
reader.close()
return reads
def paired_read_filter(scaff2pair2info, priority_reads_set=set(), tallys=None, **kwargs):
'''
Filter scaff2pair2info to keep / remove paired / unpaired reads
'''
assert type(kwargs.get('priority_reads', 'none')) != type(set())
priority_reads = priority_reads_set
pairing_filter = kwargs.get('pairing_filter', 'paired_only')
scaff2pair2infoF = {}
pair2scaffold = {}
assert type(priority_reads) == type(set()), type(priority_reads)
for scaff, p2i in scaff2pair2info.items():
# Initilize this scaffold
scaff2pair2infoF[scaff] = {}
if tallys is not None:
tallys[scaff] = defaultdict(int)
for v in ['unfiltered_reads', 'unfiltered_pairs', 'unfiltered_singletons',
'unfiltered_priority_reads']:
tallys[scaff][v] = 0
for p, i in p2i.items():
# Update tallys; info[4] = number of reads
if tallys is not None:
tallys[scaff]['unfiltered_reads'] += i[4]
if i[4] == 2:
tallys[scaff]['unfiltered_pairs'] += 1
if i[4] == 1:
tallys[scaff]['unfiltered_singletons'] += 1
if p in priority_reads:
tallys[scaff]['unfiltered_priority_reads'] += 1
# Determine if it's going to make it into the final set
if pairing_filter == 'paired_only':
if ((i[4] == 2) | (p in priority_reads)):
scaff2pair2infoF[scaff][p] = i
elif pairing_filter == 'non_discordant':
# Add it if it's not already in there
if ((p not in pair2scaffold) | (p in priority_reads)):
scaff2pair2infoF[scaff][p] = i
pair2scaffold[p] = scaff
# If it is already in there, that means its concordant, so delete it
else:
del scaff2pair2infoF[pair2scaffold[p]][p]
elif pairing_filter == 'all_reads':
if p in pair2scaffold:
# Average the pairs
mi = _merge_info(i, scaff2pair2infoF[pair2scaffold[p]][p])
scaff2pair2infoF[scaff][p] = mi
scaff2pair2infoF[pair2scaffold[p]][p] = mi
else:
pair2scaffold[p] = scaff
scaff2pair2infoF[scaff][p] = i
else:
logging.error("Do not know paired read filter \"{0}\"; crashing now".format(pairing_filter))
raise Exception
return scaff2pair2infoF
def _merge_info(i1, i2):
#{'nm':0, 'insert_distance':1, 'mapq':2, 'length':3, 'reads':4, 'start':5, 'stop':6}
return np.array([i1[0] + i2[0],
-2,
max([i1[2] + i2[2]]),
i1[3] + i2[3],
i1[4] + i2[4],
-1,
-1], dtype="int64")
def make_detailed_mapping_info(scaff2pair2info, pairTOinfo=None, version=2):
'''
Make a detailed pandas dataframe from pair2info
'''
if pairTOinfo is None:
pairTOinfo = dict()
if version == 2:
i2o = {'mm':0, 'insert_dist':1, 'mapq':2, 'length':3, 'reads':4,
'start':5, 'stop':6}
elif version == 1:
i2o = {'mm':0, 'insert_dist':1, 'mapq':2, 'length':3,}
keepers = pairTOinfo.keys()
report_keepers = (len(keepers) > 0)
table = defaultdict(list)
for scaff, pair2info in scaff2pair2info.items():
for pair, array in pair2info.items():
table['read_pair'].append(pair)
table['scaffold'].append(scaff)
if report_keepers:
table['pass_filters'].append(pair in keepers)
for item, location in i2o.items():
table[item].append(array[location])
return pd.DataFrame(table)
def load_fasta(fasta_file):
'''
Load the sequences to be profiled
Return a table listing scaffold name, start, end
'''
# PROFILE ALL SCAFFOLDS IN THE .FASTA FILE
scaff2sequence = SeqIO.to_dict(SeqIO.parse(fasta_file, "fasta")) # set up .fasta file
s2l = {s:len(scaff2sequence[s]) for s in list(scaff2sequence.keys())} # Get scaffold2length
Fdb = pd.DataFrame(list(s2l.items()), columns=['scaffold', 'end'])
Fdb['start'] = 0
return Fdb, scaff2sequence # also return s2l - alexcc 5/9/2019: Nah, make it scaff2sequence (s2s) (M.O. 6/10/19)
def filter_paired_reads_dict2(pair2info, **kwargs):
'''
Filter the dictionary of paired reads, end with read -> mm
'''
i2o = {'nm':0, 'insert_distance':1, 'mapq':2, 'length':3, 'reads':4, 'start':5, 'stop':6}
# Get kwargs
min_read_ani = kwargs.get('min_read_ani', 0.97)
max_insert_relative = kwargs.get('max_insert_relative', 3)
min_insert = kwargs.get('min_insert', 50)
min_mapq = kwargs.get('min_mapq', 2)
# Get max insert
max_insert = np.median([value[1] for key, value in pair2info.items() if value[i2o['reads']] == 2]) * max_insert_relative
# Return dictionary of pairs
return {copy.deepcopy(key):copy.deepcopy(value[0])
for key, value in pair2info.items() if _evaluate_pair2(value,
min_read_ani=min_read_ani,
max_insert=max_insert,
min_insert=min_insert,
min_mapq=min_mapq)}
def makeFilterReport2(scaff2pair2info, pairTOinfo=False, priority_reads_set=None, **kwargs):
'''
Make a scaffold-level report on when reads are filtered using get_paired_reads_multi2
If you've already filtered out pairs as you'd like, pass in pairTOinfo
'''
if priority_reads_set is None:
priority_reads_set = set()
assert type(kwargs.get('priority_reads', 'none')) != type(set())
priority_reads = priority_reads_set
profile_scaffolds = kwargs.get('scaffold_level_mapping_info', None)
#item2order
i2o = {'nm':0, 'insert_distance':1, 'mapq':2, 'length':3, 'reads':4, 'start':5, 'stop':6}
# Calculate max insert
max_insert_relative = kwargs.get('max_insert_relative', 3)
median_insert = np.median([value[i2o['insert_distance']] for scaff, pair2info in scaff2pair2info.items() for pair, value in pair2info.items() if value[i2o['reads']] == 2])
max_insert = median_insert * max_insert_relative
# Get values
values = {}
values['min_read_ani'] = kwargs.get('min_read_ani', 0.97)
values['max_insert'] = max_insert
values['min_insert'] = kwargs.get('min_insert', 50)
values['min_mapq'] = kwargs.get('min_mapq', 2)
# Make report on all scaffolds
logging.debug('running on all reads')
table = defaultdict(list)
table['scaffold'].append('all_scaffolds')
table['unfiltered_reads'].append(sum([value[i2o['reads']] for scaff, pair2info in scaff2pair2info.items() for pair, value in pair2info.items()]))
table['unfiltered_pairs'].append(len([True for scaff, pair2info in scaff2pair2info.items() for pair, value in pair2info.items() if value[i2o['reads']] == 2]))
table['unfiltered_singletons'].append(len([True for scaff, pair2info in scaff2pair2info.items() for pair, value in pair2info.items() if (value[i2o['reads']] == 1)]))
table['unfiltered_priority_reads'].append(len([True for scaff, pair2info in scaff2pair2info.items() for pair, value in pair2info.items() if (pair in priority_reads)]))
if pairTOinfo != False:
keepers = set(pairTOinfo.keys())
infos = [info for scaff, pair2info in scaff2pair2info.items() for pair, info in pair2info.items() if pair in keepers]
table['pass_pairing_filter'].append(len(infos))
else:
infos = [info for scaff, pair2info in scaff2pair2info.items() for pair, info in pair2info.items()]
for att, v in values.items():
kwargs={att:v}
table['pass_' + att].append(len([True for info in infos if (_evaluate_pair2(info, **kwargs))]))
table['filtered_pairs'].append(len([True for info in infos if (_evaluate_pair2(info, **values))]))
table['filtered_singletons'].append(len([True for info in infos if ((info[i2o['reads']] == 1) & (_evaluate_pair2(info, **values)))]))
table['filtered_priority_reads'].append(len([True for scaff, pair2info in scaff2pair2info.items() for pair, info in pair2info.items() if ((pair in priority_reads) & (_evaluate_pair2(info, **values)))]))
for i, att in enumerate(['mistmaches', 'insert_distance', 'mapq_score', 'pair_length']):
table['mean_' + att].append(np.mean([info[i] for info in infos]))
table['median_insert'].append(np.median([info[i2o['insert_distance']] for info in infos]))
table['mean_PID'].append(np.mean([(1 - (float(info[i2o['nm']]) / float(info[i2o['length']]))) for info in infos]))
Adb = pd.DataFrame(table)
table = defaultdict(list)
logging.debug('running on individual scaffolds')
for scaff, pair2info in scaff2pair2info.items():
table['scaffold'].append(scaff)
if pairTOinfo != False:
#keepers = set(pairTOinfo.keys()) This is calculated above; dont need twice
infos = [info for pair, info in pair2info.items() if pair in keepers]
table['pass_pairing_filter'].append(len(infos))
else:
infos = [info for pair, info in pair2info.items()]
table['filtered_pairs'].append(len([True for info in infos if (_evaluate_pair2(info, **values))]))
if profile_scaffolds == True:
table['unfiltered_reads'].append(sum([value[i2o['reads']] for pair, value in pair2info.items()]))
table['unfiltered_pairs'].append(len([True for pair, value in pair2info.items() if value[i2o['reads']] == 2]))
table['unfiltered_singletons'].append(len([True for pair, info in pair2info.items() if (info[i2o['reads']] == 1)]))
table['unfiltered_priority_reads'].append(len([True for pair, info in pair2info.items() if (pair in priority_reads)]))
for att, v in values.items():
kwargs={att:v}
table['pass_' + att].append(len([True for info in infos if (_evaluate_pair2(info, **kwargs))]))
table['filtered_singletons'].append(len([True for info in infos if ((info[i2o['reads']] == 1) & (_evaluate_pair2(info, **values)))]))
table['filtered_priority_reads'].append(len([True for pair, info in pair2info.items() if ((pair in priority_reads) & (_evaluate_pair2(info, **values)))]))
for i, att in enumerate(['mistmaches', 'insert_distance', 'mapq_score', 'pair_length']):
table['mean_' + att].append(np.mean([info[i] for pair, info in pair2info.items()]))
table['median_insert'].append(np.median([value[1] for key, value in pair2info.items()]))
table['mean_PID'].append(np.mean([(1 - (float(info[i2o['nm']]) / float(info[i2o['length']]))) for pair, info in pair2info.items()]))
Sdb =
|
pd.DataFrame(table)
|
pandas.DataFrame
|
"""
Created on Wed Feb 27 15:12:14 2019
@author: cwhanse
"""
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal
from datetime import datetime
import pytz
import pytest
from solarforecastarbiter.validation import validator
import pvlib
from pvlib.location import Location
@pytest.fixture
def irradiance_QCRad():
output = pd.DataFrame(
columns=['ghi', 'dhi', 'dni', 'solar_zenith', 'dni_extra',
'ghi_limit_flag', 'dhi_limit_flag', 'dni_limit_flag',
'consistent_components', 'diffuse_ratio_limit'],
data=np.array([[-100, 100, 100, 30, 1370, 0, 1, 1, 0, 0],
[100, -100, 100, 30, 1370, 1, 0, 1, 0, 0],
[100, 100, -100, 30, 1370, 1, 1, 0, 0, 1],
[1000, 100, 900, 0, 1370, 1, 1, 1, 1, 1],
[1000, 200, 800, 15, 1370, 1, 1, 1, 1, 1],
[1000, 200, 800, 60, 1370, 0, 1, 1, 0, 1],
[1000, 300, 850, 80, 1370, 0, 0, 1, 0, 1],
[1000, 500, 800, 90, 1370, 0, 0, 1, 0, 1],
[500, 100, 1100, 0, 1370, 1, 1, 1, 0, 1],
[1000, 300, 1200, 0, 1370, 1, 1, 1, 0, 1],
[500, 600, 100, 60, 1370, 1, 1, 1, 0, 0],
[500, 600, 400, 80, 1370, 0, 0, 1, 0, 0],
[500, 500, 300, 80, 1370, 0, 0, 1, 1, 1],
[0, 0, 0, 93, 1370, 1, 1, 1, 0, 0]]))
dtypes = ['float64', 'float64', 'float64', 'float64', 'float64',
'bool', 'bool', 'bool', 'bool', 'bool']
for (col, typ) in zip(output.columns, dtypes):
output[col] = output[col].astype(typ)
return output
def test_check_ghi_limits_QCRad(irradiance_QCRad):
expected = irradiance_QCRad
ghi_out_expected = expected['ghi_limit_flag']
ghi_out = validator.check_ghi_limits_QCRad(expected['ghi'],
expected['solar_zenith'],
expected['dni_extra'])
assert_series_equal(ghi_out, ghi_out_expected)
def test_check_dhi_limits_QCRad(irradiance_QCRad):
expected = irradiance_QCRad
dhi_out_expected = expected['dhi_limit_flag']
dhi_out = validator.check_dhi_limits_QCRad(expected['dhi'],
expected['solar_zenith'],
expected['dni_extra'])
assert_series_equal(dhi_out, dhi_out_expected)
def test_check_dni_limits_QCRad(irradiance_QCRad):
expected = irradiance_QCRad
dni_out_expected = expected['dni_limit_flag']
dni_out = validator.check_dni_limits_QCRad(expected['dni'],
expected['solar_zenith'],
expected['dni_extra'])
assert_series_equal(dni_out, dni_out_expected)
def test_check_irradiance_limits_QCRad(irradiance_QCRad):
expected = irradiance_QCRad
ghi_out_expected = expected['ghi_limit_flag']
ghi_out, dhi_out, dni_out = validator.check_irradiance_limits_QCRad(
expected['solar_zenith'], expected['dni_extra'], ghi=expected['ghi'])
assert_series_equal(ghi_out, ghi_out_expected)
assert dhi_out is None
assert dni_out is None
dhi_out_expected = expected['dhi_limit_flag']
ghi_out, dhi_out, dni_out = validator.check_irradiance_limits_QCRad(
expected['solar_zenith'], expected['dni_extra'], ghi=expected['ghi'],
dhi=expected['dhi'])
assert_series_equal(dhi_out, dhi_out_expected)
dni_out_expected = expected['dni_limit_flag']
ghi_out, dhi_out, dni_out = validator.check_irradiance_limits_QCRad(
expected['solar_zenith'], expected['dni_extra'],
dni=expected['dni'])
assert_series_equal(dni_out, dni_out_expected)
def test_check_irradiance_consistency_QCRad(irradiance_QCRad):
expected = irradiance_QCRad
cons_comp, diffuse = validator.check_irradiance_consistency_QCRad(
expected['ghi'], expected['solar_zenith'], expected['dni_extra'],
expected['dhi'], expected['dni'])
assert_series_equal(cons_comp, expected['consistent_components'])
assert_series_equal(diffuse, expected['diffuse_ratio_limit'])
@pytest.fixture
def weather():
output = pd.DataFrame(columns=['air_temperature', 'wind_speed',
'relative_humidity',
'extreme_temp_flag', 'extreme_wind_flag',
'extreme_rh_flag'],
data=np.array([[-40, -5, -5, 0, 0, 0],
[10, 10, 50, 1, 1, 1],
[140, 55, 105, 0, 0, 0]]))
dtypes = ['float64', 'float64', 'float64', 'bool', 'bool', 'bool']
for (col, typ) in zip(output.columns, dtypes):
output[col] = output[col].astype(typ)
return output
def test_check_temperature_limits(weather):
expected = weather
result_expected = expected['extreme_temp_flag']
result = validator.check_temperature_limits(expected['air_temperature'])
assert_series_equal(result, result_expected)
def test_check_wind_limits(weather):
expected = weather
result_expected = expected['extreme_wind_flag']
result = validator.check_wind_limits(expected['wind_speed'])
assert_series_equal(result, result_expected)
def test_check_rh_limits(weather):
expected = weather
data = expected['relative_humidity']
result_expected = expected['extreme_rh_flag']
result = validator.check_rh_limits(data)
result.name = 'extreme_rh_flag'
assert_series_equal(result, result_expected)
def test_check_ac_power_limits():
index = pd.date_range(
start='20200401 0700', freq='2h', periods=6, tz='UTC')
power = pd.Series([0, -0.1, 0.1, 1, 1.1, -0.1], index=index)
day_night = pd.Series([0, 0, 0, 1, 1, 1], index=index, dtype='bool')
capacity = 1.
expected = pd.Series([1, 0, 0, 1, 0, 0], index=index).astype(bool)
out = validator.check_ac_power_limits(power, day_night, capacity)
assert_series_equal(out, expected)
def test_check_dc_power_limits():
index = pd.date_range(
start='20200401 0700', freq='2h', periods=6, tz='UTC')
power = pd.Series([0, -0.1, 0.1, 1, 1.3, -0.1], index=index)
day_night = pd.Series([0, 0, 0, 1, 1, 1], index=index, dtype='bool')
capacity = 1.
expected = pd.Series([1, 0, 0, 1, 0, 0], index=index).astype(bool)
out = validator.check_dc_power_limits(power, day_night, capacity)
assert_series_equal(out, expected)
def test_check_limits():
# testing with input type Series
expected = pd.Series(data=[True, False])
data = pd.Series(data=[3, 2])
result = validator._check_limits(val=data, lb=2.5)
assert_series_equal(expected, result)
result = validator._check_limits(val=data, lb=3, lb_ge=True)
assert_series_equal(expected, result)
data = pd.Series(data=[3, 4])
result = validator._check_limits(val=data, ub=3.5)
assert_series_equal(expected, result)
result = validator._check_limits(val=data, ub=3, ub_le=True)
assert_series_equal(expected, result)
result = validator._check_limits(val=data, lb=3, ub=4, lb_ge=True,
ub_le=True)
assert all(result)
result = validator._check_limits(val=data, lb=3, ub=4)
assert not any(result)
with pytest.raises(ValueError):
validator._check_limits(val=data)
@pytest.fixture
def location():
return Location(latitude=35.05, longitude=-106.5, altitude=1619,
name="Albuquerque", tz="MST")
@pytest.fixture
def times():
MST = pytz.timezone('MST')
return pd.date_range(start=datetime(2018, 6, 15, 12, 0, 0, tzinfo=MST),
end=datetime(2018, 6, 15, 13, 0, 0, tzinfo=MST),
freq='10T')
def test_check_ghi_clearsky(mocker, location, times):
clearsky = location.get_clearsky(times)
# modify to create test conditions
ghi = clearsky['ghi'].copy()
ghi.iloc[0] *= 0.5
ghi.iloc[-1] *= 2.0
clear_times = np.tile(True, len(times))
clear_times[-1] = False
expected = pd.Series(index=times, data=clear_times)
result = validator.check_ghi_clearsky(ghi, clearsky['ghi'])
assert_series_equal(result, expected)
def test_check_poa_clearsky(mocker, times):
dt = pd.date_range(start=datetime(2019, 6, 15, 12, 0, 0),
freq='15T', periods=5)
poa_global = pd.Series(index=dt, data=[800, 1000, 1200, -200, np.nan])
poa_clearsky = pd.Series(index=dt, data=1000)
result = validator.check_poa_clearsky(poa_global, poa_clearsky)
expected = pd.Series(index=dt, data=[True, True, False, True, False])
assert_series_equal(result, expected)
result = validator.check_poa_clearsky(poa_global, poa_clearsky, kt_max=1.2)
expected = pd.Series(index=dt, data=[True, True, True, True, False])
assert_series_equal(result, expected)
def test_check_day_night():
MST = pytz.timezone('MST')
times = [datetime(2018, 6, 15, 12, 0, 0, tzinfo=MST),
datetime(2018, 6, 15, 22, 0, 0, tzinfo=MST)]
expected = pd.Series(data=[True, False], index=times)
solar_zenith = pd.Series(data=[11.8, 114.3], index=times)
result = validator.check_day_night(solar_zenith)
|
assert_series_equal(result, expected)
|
pandas.testing.assert_series_equal
|
import datetime
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
from kartothek.core.common_metadata import make_meta, read_schema_metadata
from kartothek.core.dataset import DatasetMetadata
from kartothek.core.uuid import gen_uuid
from kartothek.io.eager import (
create_empty_dataset_header,
store_dataframes_as_dataset,
write_single_partition,
)
from kartothek.io.testing.write import * # noqa: F40
from kartothek.io_components.metapartition import MetaPartition
def _store_dataframes(dfs, **kwargs):
# Positional arguments in function but `None` is acceptable input
for kw in ("dataset_uuid", "store"):
if kw not in kwargs:
kwargs[kw] = None
return store_dataframes_as_dataset(dfs=dfs, **kwargs)
@pytest.fixture()
def bound_store_dataframes():
return _store_dataframes
def test_write_single_partition(store_factory, mock_uuid, metadata_version):
create_empty_dataset_header(
store=store_factory(),
schema=
|
pd.DataFrame({"col": [1]})
|
pandas.DataFrame
|
#! /usr/bin/env python3
"""
Copyright 2021 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#
# Classify applications into 104 classes given their raw code.
#
# The representation (graph) is created from IR.
#
import os
import sys
import glob
import pandas as pd
import pickle as pk
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from stellargraph import StellarDiGraph
from absl import app, flags, logging
from yacos.info import compy as R
from yacos.info.compy.extractors import LLVMDriver
def extract_graph_data(graph, graph_type):
"""Extract edges, nodes and embeddings."""
nodes = {}
#nodes['word2vec'] = graph.get_nodes_word2vec_embeddings('ir')
nodes['histogram'] = graph.get_nodes_histogram_embeddings('ir')
nodes['inst2vec'] = graph.get_nodes_inst2vec_embeddings()
nodes['ir2vec'] = graph.get_nodes_ir2vec_embeddings()
nodes['opcode'] = graph.get_nodes_opcode_embeddings()
edges = graph.get_edges_str_dataFrame()
return edges, nodes
def execute(argv):
"""Extract a graph representation."""
del argv
FLAGS = flags.FLAGS
# Verify datset directory.
if not os.path.isdir(FLAGS.dataset_directory):
logging.error('Dataset directory {} does not exist.'.format(
FLAGS.dataset_directory)
)
sys.exit(1)
"""Extract the representation from the source code."""
# Instantiate the LLVM driver.
driver = LLVMDriver()
# Define the builder
builder = R.LLVMGraphBuilder(driver)
# Define the visitor
visitors = {
# CFG
'cfg_call': R.LLVMCFGCallVisitor,
'cfg_call_nr': R.LLVMCFGCallNoRootVisitor,
'cfg_call_compact_me': R.LLVMCFGCallCompactMultipleEdgesVisitor,
'cfg_call_compact_se': R.LLVMCFGCallCompactSingleEdgeVisitor,
'cfg_call_compact_me_nr': R.LLVMCFGCallCompactMultipleEdgesNoRootVisitor,
'cfg_call_compact_se_nr': R.LLVMCFGCallCompactSingleEdgeNoRootVisitor,
# CDFG
'cdfg_call': R.LLVMCDFGCallVisitor,
'cdfg_call_nr': R.LLVMCDFGCallNoRootVisitor,
'cdfg_call_compact_me': R.LLVMCDFGCallCompactMultipleEdgesVisitor,
'cdfg_call_compact_se': R.LLVMCDFGCallCompactSingleEdgeVisitor,
'cdfg_call_compact_me_nr': R.LLVMCDFGCallCompactMultipleEdgesNoRootVisitor,
'cdfg_call_compact_se_nr': R.LLVMCDFGCallCompactSingleEdgeNoRootVisitor,
# CDFG PLUS
'cdfg_plus': R.LLVMCDFGPlusVisitor,
'cdfg_plus_nr': R.LLVMCDFGPlusNoRootVisitor,
# PROGRAML
'programl': R.LLVMProGraMLVisitor,
'programl_nr': R.LLVMProGraMLNoRootVisitor
}
folders = [
os.path.join(FLAGS.dataset_directory, subdir)
for subdir in os.listdir(FLAGS.dataset_directory)
if os.path.isdir(os.path.join(FLAGS.dataset_directory, subdir))
]
idx = FLAGS.dataset_directory.rfind('/')
last_folder = FLAGS.dataset_directory[idx+1:]
# Load data from all folders
for folder in folders:
sources = glob.glob('{}/*.ll'.format(folder))
for source in sources:
try:
extractionInfo = builder.ir_to_info(source)
graph = builder.info_to_representation(extractionInfo,
visitors[FLAGS.graph])
edges, nodes_data = extract_graph_data(graph, FLAGS.graph)
except Exception:
logging.error('Error {}.'.format(source))
continue
for feat, feat_data in nodes_data.items():
indexes = []
embeddings = []
for idx, _, emb in feat_data:
indexes.append(idx)
embeddings.append(emb)
nodes =
|
pd.DataFrame(embeddings, index=indexes)
|
pandas.DataFrame
|
# Haybaler
# <NAME>, Nov 2020 - April 2021
# Combine your Wochenende .bam.txt or reporting output from multiple samples into one matrix per stat.
# Usage: bash run_haybaler.sh
import pandas as pd
import click
import os
import re
version = "0.30 - April 2021"
# changelog
# 0.30 read all samples in one call. Filter out taxa with values below a readcount and RPMM limit
# 0.23 improve file input and arg handling
# 0.22 bugfix, correct gc_ref and chr_length for new chromosomes
# 0.21 fix ordering problems
# 0.20 add find_order and sort_new functions, so taxa with highest readcounts come first
# 0.11 add heatmap prep and R scripts
# 0.10 initial commits, improvements, testing
def read_csv(filename, filepath):
return pd.read_csv(filepath + '/' + filename, decimal=",", index_col=0)
def txt_to_df(filename, filepath):
with open(filepath + '/' + filename) as infile, open('tmp.csv', 'w') as outfile:
# add column names (not given in txt.file), save new file as temp outfile
outfile.write("species,chr_length,read_count,unmapped_read_segments\n")
# replace tabs with comma(tab separated to comma separated)
for line in infile:
outfile.write(" ".join(line.split()).replace(' ', ','))
outfile.write("\n")
file = pd.read_csv("tmp.csv", decimal=",", index_col=0)
if os.path.exists("tmp.csv"): # del tmp file outfile
os.remove("tmp.csv")
del file['unmapped_read_segments'] # unneeded column?
return file
def join_dfs(file, name, path, column, input_name):
sample = (input_name[:input_name.find(".")]) # shorten sample name
sub_df = file[[column]].copy() # new df with just the wanted column
sub_df = sub_df.rename(columns={column: sample}) # rename column to sample name
if os.path.isfile(path + "/" + column + "_" + name): # if the file for the wanted stat already exists
old = pd.read_csv(path + "/" + column + "_" + name, decimal=",", index_col=0, sep='\t')
old.fillna(0.0, inplace=True)
if sample not in old.columns: # no double samples
new_chr = [
chromosome
for chromosome in file.index
if chromosome not in old.index
]
# get a df with the chr_length and gc_ref from the new chromosomes
if 'gc_ref' in file:
new_chr_df = file.loc[new_chr, ['chr_length', 'gc_ref']]
else:
new_chr_df = file.loc[new_chr, ['chr_length']]
old = old.append(new_chr_df) # append the df with chr_length and gc_ref to the old df
new = pd.concat([old, sub_df], axis=1, sort=False) # add the new column to the old df
if 'gc_ref' not in new and 'gc_ref' in file:
gc = file[['gc_ref']].copy()
new = pd.concat([new, gc], axis=1, sort=False)
tmp = new['gc_ref'].to_list()
del new['gc_ref']
new.insert(1, 'gc_ref', tmp)
else:
new = old
else:
if 'gc_ref' in file:
new = file[['chr_length', 'gc_ref', column]].copy()
else:
new = file[['chr_length', column]].copy()
new = new.rename(columns={column: sample})
new.fillna(0.0, inplace=True)
new = new.astype(float)
new = new.round(2)
return new
# calculate in which order the organisms should be in the output files.
# the organism with the most read count in all samples should come first
def find_order(df):
samples = [
column
for column in df.columns
if column not in ['chr_length', 'gc_ref']
]
sum_organisms = [] # list of the sum form all samples for each organism (row sums)
for organism in df.index:
tmp_organism = [float(df.at[organism, column]) for column in samples]
sum_organisms.append(sum(tmp_organism))
df['sum_organisms'] = sum_organisms # add a column with the sums to the df
df = df.sort_values(by='sum_organisms', ascending=False) # sort the df by the sums
df = df.drop(['sum_organisms'], axis=1) # delete the column with the sums
order = df.index
return df, order
# sort the new df so it fits the previous calculated order
def sort_new(df, order):
order_df = pd.DataFrame(index=order) # create an empty order_df with just the right orderer organisms as index
return
|
pd.concat([order_df, df], axis=1, sort=False)
|
pandas.concat
|
import numpy as np
from numpy.fft import fft, ifft
# from: http://www.mirzatrokic.ca/FILES/codes/fracdiff.py
# small modification: wrapped 2**np.ceil(...) around int()
# https://github.com/SimonOuellette35/FractionalDiff/blob/master/question2.py
_default_thresh = 1e-4
def get_weights(d, size):
"""Expanding window fraction difference weights."""
w = [1.0]
for k in range(1, size):
w_ = -w[-1] / k * (d - k + 1)
w.append(w_)
w = np.array(w[::-1]).reshape(-1, 1)
return w
import numba
@numba.njit
def get_weights_ffd(d, thres, lim=99999):
"""Fixed width window fraction difference weights.
Set lim to be large if you want to only stop at thres.
Set thres to be zero if you want to ignore it.
"""
w = [1.0]
k = 1
for i in range(1, lim):
w_ = -w[-1] / k * (d - k + 1)
if abs(w_) < thres:
break
w.append(w_)
k += 1
w = np.array(w[::-1]).reshape(-1, 1)
return w
def frac_diff_ffd(x, d, thres=_default_thresh, lim=None):
assert isinstance(x, np.ndarray)
assert x.ndim == 1
if lim is None:
lim = len(x)
w, out = _frac_diff_ffd(x, d, lim, thres=thres)
# print(f'weights is shape {w.shape}')
return out
# this method was not faster
# def frac_diff_ffd_stride_tricks(x, d, thres=_default_thresh):
# """d is any positive real"""
# assert isinstance(x, np.ndarray)
# w = get_weights_ffd(d, thres, len(x))
# width = len(w) - 1
# output = np.empty(len(x))
# output[:width] = np.nan
# output[width:] = np.dot(np.lib.stride_tricks.as_strided(x, (len(x) - width, len(w)), (x.itemsize, x.itemsize)), w[:,0])
# return output
@numba.njit
def _frac_diff_ffd(x, d, lim, thres=_default_thresh):
"""d is any positive real"""
w = get_weights_ffd(d, thres, lim)
width = len(w) - 1
output = []
output.extend([np.nan] * width) # the first few entries *were* zero, should be nan?
for i in range(width, len(x)):
output.append(np.dot(w.T, x[i - width: i + 1])[0])
return w, np.array(output)
def fast_frac_diff(x, d):
"""expanding window version using fft form"""
assert isinstance(x, np.ndarray)
T = len(x)
np2 = int(2 ** np.ceil(np.log2(2 * T - 1)))
k = np.arange(1, T)
b = (1,) + tuple(np.cumprod((k - d - 1) / k))
z = (0,) * (np2 - T)
z1 = b + z
z2 = tuple(x) + z
dx = ifft(fft(z1) * fft(z2))
return np.real(dx[0:T])
# TESTS
def test_all():
for d in [0.3, 1, 1.5, 2, 2.5]:
test_fast_frac_diff_equals_fracDiff_original_impl(d=d)
test_frac_diff_ffd_equals_original_impl(d=d)
# test_frac_diff_ffd_equals_prado_original(d=d) # his implementation is busted for fractional d
# def test_frac_diff_ffd_equals_prado_original(d=3):
# # ignore this one for now as Prado's version does not work
# from .prado_orig import fracDiff_FFD_prado_original
# import pandas as pd
#
# x = np.random.randn(100)
# a = frac_diff_ffd(x, d, thres=_default_thresh)
# b = fracDiff_FFD_prado_original(pd.DataFrame(x), d, thres=_default_thresh)
# b = np.squeeze(b.values)
# a = a[d:] # something wrong with the frac_diff_ffd gives extra entries of zero
# assert np.allclose(a, b)
# # return locals()
def test_frac_diff_ffd_equals_original_impl(d=3):
from .prado_orig import fracDiff_FFD_original_impl
import pandas as pd
x = np.random.randn(100)
a = frac_diff_ffd(x, d, thres=_default_thresh)
b = fracDiff_FFD_original_impl(
|
pd.DataFrame(x)
|
pandas.DataFrame
|
import bct
import numpy as np
import pandas as pd
from my_settings import (source_folder, results_path)
subjects = [
"0008", "0009", "0010", "0012", "0013", "0014", "0015", "0016",
"0019", "0020", "0021", "0022"
]
ge_data_all = pd.DataFrame()
lambda_data_all =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
@brief test log(time=10s)
"""
import unittest
import pandas
from pyquickhelper.pycode import ExtTestCase
from lightmlrestapi.args.encrypt_helper import encrypt_passwords, load_passwords
class TestEncrypt(ExtTestCase):
def test_encrypt_passwords(self):
users = [('login', 'pwd'), ('login2', 'pwd2')]
enc = encrypt_passwords(users)
self.assertEqual(len(enc), 2)
self.assertEqual(enc[0][0], users[0][0])
self.assertIsInstance(enc[0][1], str)
df =
|
pandas.DataFrame(users, columns=["aa", "bb"])
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Functions for Hydra - learning ddGoffset values for free energy perturbations.
"""
# TF-related imports & some settings to reduce TF verbosity:
import os
os.environ["CUDA_VISIBLE_DEVICES"]="1" # current workstation contains 4 GPUs; exclude 1st
import tensorflow as tf
from tensorflow import keras
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
# hyperparameter optimisation:
import skopt
from skopt import gp_minimize, forest_minimize
from skopt.space import Real, Categorical, Integer
from skopt.plots import plot_convergence
from skopt.plots import plot_objective, plot_evaluations
from tensorflow.python.keras import backend as K
from skopt.utils import use_named_args
# featurisation:
from mordred import Calculator, descriptors
from rdkit import Chem
from rdkit.Chem import AllChem, rdmolfiles
# general imports:
import pandas as pd
import numpy as np
import csv
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from datetime import datetime
from sklearn import preprocessing, decomposition
from sklearn.model_selection import train_test_split
from scipy import stats
from tqdm import tqdm
import glob
import pickle
# global startpoint for SKOPT optimisation:
startpoint_error = np.inf
###################################################
###################################################
###################### UTILS ######################
###################################################
###################################################
def retrieveMoleculePDB(ligand_path):
"""
Returns RDKit molecule objects for requested path PDB file.
-- args
ligand_path (str): path leading to molecule pdb file
-- returns
RDKit molecule object
"""
mol = rdmolfiles.MolFromPDBFile(
ligand_path,
sanitize=True
)
return mol
def readHDF5Iterable(path_to_trainingset, chunksize):
"""
Read in a training set using pandas' HDF5 utility
--args
path_to_trainingset (str): path to training set (HDF5) to read from
chunksize (int): number of items to read in per increment (recommended 5000 for large datasets)
--returns
training_set (iterable)
"""
training_set = pd.DataFrame()
# use chunksize to save memory during reading:
training_set_iterator = pd.read_hdf(path_to_trainingset, chunksize=chunksize)
return training_set_iterator
###################################################
###################################################
################## FEATURISERS ####################
###################################################
###################################################
###################################################
### Molecular properties: ###
###
###
def computeLigMolProps(
transfrm_path="transformations/",
working_dir="features/MOLPROPS/",
target_columns=None,
verbose=False):
"""
Compute molecular properties for the molecules in given transfrm_path and write to file.
--args
transfrm_path (str): path to directory containing ligand files
working_dir (str): path to directory to pickle into
verbose (bool): whether or not to print featurisation info to stdout
--returns
molprops_set (pandas dataframe): set of molecules with molecular properties
"""
mol_paths = glob.glob(transfrm_path+"*")
# generate RDKit mol objects from paths:
mols_rdkit = [ retrieveMoleculePDB(mol) for mol in mol_paths ]
# generate molecule name from paths for indexing:
mols_names = [ mol.replace(transfrm_path, "").split(".")[0] for mol in mol_paths ]
# generate all descriptors available in mordred:
calc = Calculator(descriptors, ignore_3D=False)
print("Computing molecular properties:")
molprops_set = calc.pandas(mols_rdkit)
# remove columns with bools or strings (not fit for subtraction protocol):
if target_columns is not None:
# if variable is input the function is handling a testset and must
# keep the same columns as train dataset:
molprops_set = molprops_set[target_columns]
else:
# if making a training dataset, decide which columns to retain:
molprops_set = molprops_set.select_dtypes(include=["float64", "int64"])
molprops_set.index = mols_names
# pickle dataframe to specified directory:
molprops_set.to_pickle(working_dir+"molprops.pickle")
if verbose:
print(molprops_set)
return molprops_set
def computePertMolProps(
perturbation_paths,
molprops_set=None,
free_path="SOLVATED/",
working_dir="features/MOLPROPS/"):
"""
Read featurised FEP molecules and generate matches based on user input perturbations.
Writes each perturbation features by appending it to the features.csv file.
--args
perturbation_paths (list): nested list of shape [[A~B],[C~D]] with strings describing
the perturbations. These combinations will be used to make pairwise extractions
from molprops_set.
molprops_set (pandas dataframe; optional): dataframe object that contains the
featurised FEP dataset. If None, will attempt to pickle from working_dir
free_path (str): path to directory containing perturbation directories
working_dir (str): path to directory to pickle dataset from
--returns
None
"""
# test if input is there:
if molprops_set is None:
try:
molprops_set = pd.read_pickle(working_dir+"molprops.pickle")
except FileNotFoundError:
print("Unable to load pickle file with per-ligand molprop data in absence of molprops_set function input.")
# clean slate featurised perturbations dataset; write column names:
open(working_dir+"featurised_molprops.h5", "w").close()
store = pd.HDFStore(working_dir+"featurised_molprops.h5")
# write list of column names to file for future testset feature generation:
|
pd.DataFrame(molprops_set.columns)
|
pandas.DataFrame
|
from numpy import dot, reshape, zeros, identity, ravel, full
from numpy.linalg import inv, LinAlgError
import pandas as pd
from sstspack import DynamicLinearGaussianModel
from sstspack.Utilities import jacobian
from sstspack.DynamicLinearGaussianModelClass import EPSILON
class ExtendedDynamicModel(DynamicLinearGaussianModel):
""""""
expected_columns = ("Z_fn", "H_fn", "T_fn", "R_fn", "Q_fn")
estimation_columns = [
"Z",
"Z_prime",
"H",
"T",
"T_prime",
"R",
"Q",
"a_hat_initial",
"V_initial",
"Z_hat",
"Z_hat_prime",
"H_hat",
"T_hat",
"T_hat_prime",
"R_hat",
"Q_hat",
"a_hat_prior",
"a_hat_posterior",
"P_hat_prior",
"P_hat_posterior",
"v_hat",
"F_hat_inverse",
"K_hat",
"L_hat",
"r_hat",
"N_hat",
"r0_hat",
"r1_hat",
"N0_hat",
"N1_hat",
"N2_hat",
"F1_hat",
"F2_hat",
"L0_hat",
"L1_hat",
"K0_hat",
"K1_hat",
"P_hat_infinity_prior",
"P_hat_infinity_posterior",
"P_hat_star_prior",
"P_hat_star_posterior",
"F_hat_infinity",
"F_hat_star",
"M_hat_infinity",
"M_hat_star",
] + DynamicLinearGaussianModel.estimation_columns
def __init__(
self,
y_series,
model_design_df,
a_prior_initial=None,
P_prior_initial=None,
diffuse_states=None,
validate_input=True,
):
""""""
self.column_redirects = {}
self.initial_smoother_run = False
DynamicLinearGaussianModel.__init__(
self,
y_series,
model_design_df,
a_prior_initial,
P_prior_initial,
diffuse_states,
validate_input,
)
def __getattr__(self, name):
""""""
if name in ["c", "d"]:
return pd.Series([full((1, 1), 0)] * len(self.index), index=self.index)
if name in self.column_redirects:
name = self.column_redirects[name]
return DynamicLinearGaussianModel.__getattr__(self, name)
def _add_column_redirects(self):
""""""
if not self.initial_smoother_run:
self.column_redirects = {
# state terms
"a_hat": "a_hat_initial",
"V": "V_initial",
# model terms
"Z": "Z_prime",
"T": "T_prime",
# estimation terms
"K": "K_hat",
"L": "L_hat",
"r": "r_hat",
"N": "N_hat",
"r_final": "r_hat_final",
"N_final": "N_hat_final",
"r0": "r0_hat",
"r1": "r1_hat",
"N0": "N0_hat",
"N1": "N1_hat",
"N2": "N2_hat",
"r0_final": "r0_hat_final",
"r1_final": "r1_hat_final",
"N0_final": "N0_hat_final",
"N1_final": "N1_hat_final",
"N2_final": "N2_hat_final",
}
else:
self.column_redirects = {
# state terms
"a_prior": "a_hat_prior",
"a_posterior": "a_hat_posterior",
"P_prior": "P_hat_prior",
"P_posterior": "P_hat_posterior",
"P_infinity_prior": "P_hat_infinity_prior",
"P_infinity_posterior": "P_hat_infinity_posterior",
"P_star_prior": "P_hat_star_prior",
"P_star_posterior": "P_hat_star_posterior",
# model terms
"Z": "Z_hat_prime",
"H": "H_hat",
"T": "T_hat_prime",
"R": "R_hat",
"Q": "Q_hat",
# estimation terms
"v": "v_hat",
"F_inverse": "F_hat_inverse",
"F_infinity": "F_hat_infinity",
"F_star": "F_hat_star",
"M_infinity": "M_hat_infinity",
"M_star": "M_hat_star",
"F1": "F1_hat",
"F2": "F2_hat",
"K0": "K0_hat",
"K1": "K1_hat",
"L0": "L0_hat",
"L1": "L1_hat",
}
def _initialise_model_data(self, a_prior_initial):
""""""
self._m = a_prior_initial.shape[0]
for idx in self.index:
self.Z[idx] = self.Z_fn[idx](a_prior_initial)
self.H[idx] = self.H_fn[idx](a_prior_initial)
self.T[idx] = self.T_fn[idx](a_prior_initial)
self.R[idx] = self.R_fn[idx](a_prior_initial)
self.Q[idx] = self.Q_fn[idx](a_prior_initial)
self._add_column_redirects()
def _verification_columns(self, p, idx):
""""""
return {
"Z": (p[idx], 1),
"H": (p[idx], p[idx]),
"T": (self.m, 1),
"R": (self.m, self.r_eta),
"Q": (self.r_eta, self.r_eta),
}
def _prediction_error(self, key):
""""""
if self.initial_smoother_run:
return self.y[key] - self.model_data_df.Z_hat[key]
return self.y[key] - self.model_data_df.Z[key]
def _non_missing_F(self, key):
""""""
self._initialise_data_fn(key)
return DynamicLinearGaussianModel._non_missing_F(self, key)
def _diffuse_filter_posterior_recursion_step(self, key):
""""""
self._initialise_state_fn(key)
return DynamicLinearGaussianModel._diffuse_filter_posterior_recursion_step(
self, key
)
def _filter_posterior_recursion_step(self, key):
""""""
self._initialise_state_fn(key)
return DynamicLinearGaussianModel._filter_posterior_recursion_step(self, key)
def _initialise_data_fn(self, key):
""""""
if not self.initial_smoother_run:
self.model_data_df.Z[key] = self.Z_fn[key](self.a_prior[key])
if "Z_prime_fn" in self.model_data_df.columns:
self.Z_prime[key] = self.Z_prime_fn[key](self.a_prior[key])
else:
self.Z_prime[key] = reshape(
jacobian(self.Z_fn[key], self.a_prior[key], h=1e-10),
(self.p[key], self.m),
)
self.H[key] = self.H_fn[key](self.a_prior[key])
else:
self.model_data_df.Z_hat[key] = self.Z_fn[key](self.a_hat_prior[key])
if "Z_prime_fn" in self.model_data_df.columns:
self.Z_hat_prime[key] = self.Z_prime_fn[key](self.a_hat_prior[key])
else:
self.Z_hat_prime[key] = reshape(
jacobian(self.Z_fn[key], self.a_hat_initial[key], h=1e-10),
(self.p[key], self.m),
)
self.H_hat[key] = self.H_fn[key](self.a_hat_initial[key])
def _initialise_state_fn(self, key):
""""""
if not self.initial_smoother_run:
self.model_data_df["T"][key] = self.T_fn[key](self.a_posterior[key])
if "T_prime_fn" in self.model_data_df.columns:
self.T_prime[key] = self.T_prime_fn[key](self.a_posterior[key])
else:
self.T_prime[key] = reshape(
jacobian(self.T_fn[key], self.a_posterior[key], h=1e-10),
(self.m, self.m),
)
self.R[key] = self.R_fn[key](self.a_posterior[key])
self.Q[key] = self.Q_fn[key](self.a_posterior[key])
else:
self.T_hat[key] = self.T_fn[key](self.a_hat_initial[key])
if "T_prime_fn" in self.model_data_df.columns:
self.T_hat_prime[key] = self.T_prime_fn[key](self.a_hat_initial[key])
else:
self.T_hat_prime[key] = reshape(
jacobian(self.T_fn[key], self.a_hat_initial[key], h=1e-10),
(self.m, self.m),
)
self.R_hat[key] = self.R_fn[key](self.a_hat_initial[key])
self.Q_hat[key] = self.Q_fn[key](self.a_hat_initial[key])
def aggregate_field(self, field, mask=None):
""""""
data = []
for idx in self.index:
Z = mask
if Z is None:
Z = self.Z_prime[idx]
value = dot(Z, self.model_data_df.loc[idx, field])
if value.shape == (1, 1):
value = value[0, 0]
data.append(value)
return
|
pd.Series(data, index=self.index)
|
pandas.Series
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 26 19:28:36 2019
@author: github.com/sahandv
"""
import sys
import gc
from tqdm import tqdm
import pandas as pd
import numpy as np
import re
from sciosci.assets import text_assets as kw
from sciosci.assets import keyword_dictionaries as kd
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
tqdm.pandas()
nltk.download('wordnet')
nltk.download('punkt')
# =============================================================================
# Read data and Initialize
# =============================================================================
year_from = 0
year_to = 2021
MAKE_SENTENCE_CORPUS = False
MAKE_SENTENCE_CORPUS_ADVANCED_KW = False
MAKE_SENTENCE_CORPUS_ADVANCED = False
MAKE_REGULAR_CORPUS = True
GET_WORD_FREQ_IN_SENTENCE = False
PROCESS_KEYWORDS = False
stops = ['a','an','we','result','however','yet','since','previously','although','propose','proposed','this','...']
nltk.download('stopwords')
stop_words = list(set(stopwords.words("english")))+stops
data_path_rel = '/mnt/16A4A9BCA4A99EAD/GoogleDrive/Data/Corpus/KPRIS/kpris_data.csv'
# data_path_rel = '/mnt/6016589416586D52/Users/z5204044/GoogleDrive/GoogleDrive/Data/Corpus/AI 4k/scopus_4k.csv'
# data_path_rel = '/mnt/6016589416586D52/Users/z5204044/GoogleDrive/GoogleDrive/Data/AI ALL 1900-2019 - reformat'
# data_path_rel = '/mnt/6016589416586D52/Users/z5204044/GoogleDrive/GoogleDrive/Data/Corpus/AI 300/merged - scopus_v2_relevant wos_v1_relevant - duplicate doi removed - abstract corrected - 05 Aug 2019.csv'
data_full_relevant = pd.read_csv(data_path_rel)
# data_full_relevant = data_full_relevant[['dc:title','authkeywords','abstract','year']]
# data_full_relevant.columns = ['TI','DE','AB','PY']
sample = data_full_relevant.sample(4)
root_dir = '/mnt/16A4A9BCA4A99EAD/GoogleDrive/Data/Corpus/KPRIS/'
subdir = 'clean/' # no_lemmatization_no_stopwords
gc.collect()
data_full_relevant['PY'] = 2018
data_full_relevant['AB'] = data_full_relevant['abstract']
data_full_relevant['TI'] = ''
data_full_relevant['DE'] = np.nan
data_full_relevant['ID'] = ''
data_full_relevant['SO'] = ''
#
data_wrong = data_full_relevant[data_full_relevant['AB'].str.contains("abstract available")].index
data_wrong = list(data_wrong)
data_full_relevant = data_full_relevant.drop(data_wrong,axis=0)
# =============================================================================
# Initial Pre-Processing :
# Following tags requires WoS format. Change them otherwise.
# =============================================================================
data_filtered = data_full_relevant.copy()
data_filtered = data_filtered[pd.notnull(data_filtered['PY'])]
data_filtered = data_filtered[data_filtered['PY'].astype('int')>year_from-1]
data_filtered = data_filtered[data_filtered['PY'].astype('int')<year_to]
# Remove columns without keywords/abstract list
data_with_keywords = data_filtered[pd.notnull(data_filtered['DE'])]
data_with_abstract = data_filtered[pd.notnull(data_filtered['AB'])]
# Remove special chars and strings from abstracts
data_with_abstract['AB'] = data_with_abstract['AB'].progress_apply(lambda x: kw.find_and_remove_c(x) if pd.notnull(x) else np.nan).str.lower()
data_with_abstract['AB'] = data_with_abstract['AB'].progress_apply(lambda x: kw.find_and_remove_term(x,'et al.') if pd.notnull(x) else np.nan)
data_with_abstract['AB'] = data_with_abstract['AB'].progress_apply(lambda x: kw.find_and_remove_term(x,'eg.') if pd.notnull(x) else np.nan)
data_with_abstract['AB'] = data_with_abstract['AB'].progress_apply(lambda x: kw.find_and_remove_term(x,'ie.') if pd.notnull(x) else np.nan)
data_with_abstract['AB'] = data_with_abstract['AB'].progress_apply(lambda x: kw.find_and_remove_term(x,'vs.') if pd.notnull(x) else np.nan)
data_with_abstract['AB'] = data_with_abstract['AB'].progress_apply(lambda x: kw.find_and_remove_term(x,'ieee') if pd.notnull(x) else np.nan)
data_with_abstract['AB'] = data_with_abstract['AB'].progress_apply(lambda x: kw.find_and_remove_term(x,'fig.','figure') if pd.notnull(x) else np.nan)
# Remove numbers from abstracts to eliminate decimal points and other unnecessary data
# gc.collect()
abstracts = []
for abstract in tqdm(data_with_abstract['AB'].values.tolist()):
numbers = re.findall(r"[-+]?\d*\.\d+|\d+", abstract)
for number in numbers:
abstract = kw.find_and_remove_term(abstract,number)
abstracts.append(abstract)
data_with_abstract['AB'] = abstracts.copy()
del abstracts
source_list = pd.DataFrame(data_with_abstract['SO'].values.tolist(),columns=['source'])
source_list.to_csv(root_dir+subdir+str(year_from)+'-'+str(year_to-1)+' corpus sources',index=False) # Save year indices to disk for further use
year_list = pd.DataFrame(data_with_abstract['PY'].values.tolist(),columns=['year'])
year_list.to_csv(root_dir+subdir+str(year_from)+'-'+str(year_to-1)+' corpus years',index=False) # Save year indices to disk for further use
gc.collect()
# =============================================================================
# Sentence maker
# =============================================================================
if MAKE_SENTENCE_CORPUS is True:
thesaurus = pd.read_csv('data/thesaurus/thesaurus_for_ai_keyword_with_() (training).csv')
thesaurus = thesaurus.fillna('')
print("\nSentence maker and thesaurus matching. \nThis will take some time...")
data_with_abstract['AB_no_c'] = data_with_abstract['AB'].apply(lambda x: kw.find_and_remove_c(x) if pd.notnull(x) else np.nan)
sentence_corpus = []
for index,row in tqdm(data_with_abstract.iterrows(),total=data_with_abstract.shape[0]):
words = re.split('( |\\n|\.|\?|!|:|;|,|_|\[|\])',row['AB_no_c'].lower())
new_words = []
year = row['PY']
flag_word_removed = False
for w_idx,word in enumerate(words):
if flag_word_removed is True:
if word==' ':
flag_word_removed = False
continue
if word in thesaurus['alt'].values.tolist():
word_old = word
buffer_word = word
word = thesaurus[thesaurus['alt']==word]['original'].values.tolist()[0]
# print("changed '",word_old,"' to '",word,"'.")
new_words.append(word)
row = ''.join(new_words)
sentences = re.split('(\. |\? |\\n)',row)
sentences = [i+j for i,j in zip(sentences[0::2], sentences[1::2])]
for sentence_n in sentences:
sentence_corpus.append([index,sentence_n,year])
sentence_corpus = pd.DataFrame(sentence_corpus,columns=['article_index','sentence','year'])
sentence_corpus.to_csv(root_dir+subdir+str(year_from)+'-'+str(year_to-1)+' corpus sentences abstract-title',index=False,header=True)
gc.collect()
# =============================================================================
# Sentence maker -- Advanced --
# =============================================================================
if MAKE_SENTENCE_CORPUS_ADVANCED is True:
data_with_abstract['TI_AB'] = data_with_abstract.TI.map(str) + ". " + data_with_abstract.AB
data_fresh = data_with_abstract[['TI_AB','PY']].copy()
data_fresh['TI_AB'] = data_fresh['TI_AB'].str.lower()
del data_with_abstract
gc.collect()
data_tmp = data_fresh[1:10]
data_fresh[-2:-1]
print("\nSentence extraction")
sentences = []
years = []
indices = []
for index,row in tqdm(data_fresh.iterrows(),total=data_fresh.shape[0]):
abstract_str = row['TI_AB']
year = row['PY']
abstract_sentences = re.split('\. |\? |\\n',abstract_str)
length = len(abstract_sentences)
sentences.extend(abstract_sentences)
years.extend([year for x in range(length)])
indices.extend([index for x in range(length)])
print("\nTokenizing")
tmp = []
for sentence in tqdm(sentences):
tmp.append(word_tokenize(sentence))
sentences = tmp.copy()
del tmp
print("\nString pre processing for abstracts: lower and strip")
sentences = [list(map(str.lower, x)) for x in sentences]
sentences = [list(map(str.strip, x)) for x in sentences]
tmp = []
print("\nString pre processing for abstracts: lemmatize and stop word removal")
for string_list in tqdm(sentences, total=len(sentences)):
tmp_list = [kw.string_pre_processing(x,stemming_method='None',lemmatization='DEF',stop_word_removal=True,stop_words_extra=stops,verbose=False,download_nltk=False) for x in string_list]
tmp.append(tmp_list)
sentences = tmp.copy()
del tmp
gc.collect()
tmp = []
print("\nString pre processing for abstracts: null word removal")
for string_list in tqdm(sentences, total=len(sentences)):
tmp.append([x for x in string_list if x!=''])
sentences = tmp.copy()
del tmp
print("\nThesaurus matching")
sentences = kw.thesaurus_matching(sentences,thesaurus_file='data/thesaurus/thesaurus_for_ai_keyword_with_() (training).csv')
print("\nStitiching tokens")
tmp = []
for words in tqdm(sentences, total=len(sentences)):
tmp.append(' '.join(words))
sentences = tmp.copy()
del tmp
print("\nGB to US")
tmp = []
for sentence in tqdm(sentences, total=len(sentences)):
tmp.append(kw.replace_british_american(sentence,kd.gb2us))
sentences = tmp.copy()
del tmp
sentence_df = pd.DataFrame(indices,columns=['article_index'])
sentence_df['sentence'] = sentences
sentence_df['year'] = years
sentence_df.to_csv(root_dir+subdir+str(year_from)+'-'+str(year_to-1)+' corpus sentences abstract-title',index=False,header=True)
# =============================================================================
# Keyword Extractor
# =============================================================================
if MAKE_SENTENCE_CORPUS_ADVANCED_KW is True:
data_with_abstract['TI_AB'] = data_with_abstract.AB
data_fresh = data_with_abstract[['TI_AB','PY']].copy()
data_fresh['TI_AB'] = data_fresh['TI_AB'].str.lower()
del data_with_abstract
gc.collect()
data_tmp = data_fresh[1:10]
data_fresh[-2:-1]
print("\nSentence extraction")
sentences = []
years = []
indices = []
for index,row in tqdm(data_fresh.iterrows(),total=data_fresh.shape[0]):
abstract_str = row['TI_AB']
year = row['PY']
abstract_sentences = re.split('\\n',abstract_str)
length = len(abstract_sentences)
sentences.extend(abstract_sentences)
years.extend([year for x in range(length)])
indices.extend([index for x in range(length)])
print("\nTokenizing")
tmp = []
for sentence in tqdm(sentences):
tmp.append(word_tokenize(sentence))
sentences = tmp.copy()
del tmp
print("\nString pre processing for abstracts: lower and strip")
sentences = [list(map(str.lower, x)) for x in sentences]
sentences = [list(map(str.strip, x)) for x in sentences]
tmp = []
print("\nString pre processing for abstracts: lemmatize and stop word removal")
for string_list in tqdm(sentences, total=len(sentences)):
tmp_list = [kw.string_pre_processing(x,stemming_method='None',lemmatization='DEF',stop_word_removal=True,stop_words_extra=stops,verbose=False,download_nltk=False) for x in string_list]
tmp.append(tmp_list)
sentences = tmp.copy()
del tmp
gc.collect()
tmp = []
print("\nString pre processing ")
for string_list in tqdm(sentences, total=len(sentences)):
string_tmp = []
for token in string_list:
if token == '':
string_tmp.append(' | ')
else:
string_tmp.append(token)
tmp.append(string_tmp)
sentences = tmp.copy()
del tmp
tmp = []
print("\nString pre processing for abstracts: null word removal")
for string_list in tqdm(sentences, total=len(sentences)):
tmp.append([x for x in string_list if x!=''])
sentences = tmp.copy()
del tmp
print("\nThesaurus matching")
sentences = kw.thesaurus_matching(sentences,thesaurus_file='data/thesaurus/thesaurus_for_ai_keyword_with_() (testing).csv')
print("\nStitiching tokens")
tmp = []
for words in tqdm(sentences, total=len(sentences)):
tmp.append(' '.join(words))
sentences = tmp.copy()
del tmp
print("\nGB to US")
tmp = []
for sentence in tqdm(sentences, total=len(sentences)):
tmp.append(kw.replace_british_american(sentence,kd.gb2us))
sentences = tmp.copy()
del tmp
sentence_df = pd.DataFrame(indices,columns=['article_index'])
sentence_df['sentence'] = sentences
sentence_df['year'] = years
sentence_df.to_csv(root_dir+subdir+str(year_from)+'-'+str(year_to-1)+' corpus sentences abstract-title',index=False,header=True)
if MAKE_REGULAR_CORPUS is False:
sys.exit('Did not continue to create normal corpus. If you want a corpus, set it to True at init section.')
# =============================================================================
# Get word frequency in sentence corpus -- OPTIONAL
# =============================================================================
if GET_WORD_FREQ_IN_SENTENCE is True:
import pandas as pd
import numpy as np
from tqdm import tqdm
file = root_dir+subdir+str(year_from)+'-'+str(year_to-1)+' corpus abstract-title'#'/mnt/6016589416586D52/Users/z5204044/GoogleDrive/GoogleDrive/Data/corpus/AI ALL/1900-2019 corpus sentences abstract-title'
file = pd.read_csv(file)
size = 500000
unique = []
for data_start_point in tqdm(np.arange(0,file.shape[0],size)):
if data_start_point+size<file.shape[0]:
end_point = data_start_point+size
else:
end_point = file.shape[0]-1
# print(data_start_point,end_point)
str_split = list(file.sentence[data_start_point:end_point].str.split())
str_flat = pd.DataFrame([item for sublist in str_split for item in sublist])
str_flat.columns = ['words']
str_flat.head()
unique = unique+list(str_flat.words.unique())
unique = pd.DataFrame(unique)
unique.columns = ['words']
unique = list(unique.words.unique())
len(unique)
# =============================================================================
# Tokenize (Author Keywords and Abstracts+Titles)
# =============================================================================
abstracts = []
keywords = []
keywords_index = []
abstracts_pure = []
data_with_abstract['ID'] = ''
data_with_abstract['DE'] = ''
data_with_abstract['TI'] = ''
for index,paper in tqdm(data_with_abstract.iterrows(),total=data_with_abstract.shape[0]):
keywords_str = paper['DE']
keywords_index_str = paper['ID']
abstract_str = paper['AB']
title_str = paper['TI']
abstract_dic = word_tokenize(title_str+' '+abstract_str)
abstract_dic_pure = abstract_dic.copy()
if pd.notnull(paper['DE']):
keywords_dic = word_tokenize(keywords_str)
keywords.append(keywords_str.split(';'))
abstract_dic.extend(keywords_dic)
else:
keywords.append([])
if pd.notnull(paper['ID']):
keywords_index.append(keywords_index_str.split(';'))
else:
keywords_index.append([])
abstracts.append(abstract_dic)
abstracts_pure.append(abstract_dic_pure)
# Add to main df. Not necessary
data_with_abstract['AB_split'] = abstracts_pure
data_with_abstract['AB_KW_split'] = abstracts
# =============================================================================
# Strip and lowe case
# =============================================================================
abstracts_pure = [list(map(str.strip, x)) for x in abstracts_pure]
abstracts_pure = [list(map(str.lower, x)) for x in abstracts_pure]
abstracts = [list(map(str.strip, x)) for x in abstracts]
abstracts = [list(map(str.lower, x)) for x in abstracts]
keywords = [list(map(str.strip, x)) for x in keywords]
keywords = [list(map(str.lower, x)) for x in keywords]
keywords_index = [list(map(str.strip, x)) for x in keywords_index]
keywords_index = [list(map(str.lower, x)) for x in keywords_index]
# =============================================================================
# Pre Process
# =============================================================================
tmp_data = []
print("\nString pre processing for ababstracts_purestracts")
for string_list in tqdm(abstracts, total=len(abstracts)):
tmp_list = [kw.string_pre_processing(x,stemming_method='None',lemmatization='DEF',stop_word_removal=True,stop_words_extra=stops,verbose=False,download_nltk=False) for x in string_list]
tmp_data.append(tmp_list)
abstracts = tmp_data.copy()
del tmp_data
tmp_data = []
for string_list in tqdm(abstracts_pure, total=len(abstracts_pure)):
tmp_list = [kw.string_pre_processing(x,stemming_method='None',lemmatization=False,stop_word_removal=True,stop_words_extra=stops,verbose=False,download_nltk=False) for x in string_list]
tmp_data.append(tmp_list)
abstracts_pure = tmp_data.copy()
del tmp_data
if PROCESS_KEYWORDS is True:
print("\nString pre processing for keywords")
tmp_data = []
for string_list in tqdm(keywords, total=len(keywords)):
tmp_list = []
for string in string_list:
tmp_sub_list = string.split()
tmp_list.append(' '.join([kw.string_pre_processing(x,stemming_method='None',lemmatization=False,stop_word_removal=True,stop_words_extra=stops,verbose=False,download_nltk=False) for x in tmp_sub_list]))
tmp_data.append(tmp_list)
keywords = tmp_data.copy()
del tmp_data
tmp_data = []
for string_list in tqdm(keywords_index, total=len(keywords_index)):
tmp_list = []
for string in string_list:
tmp_sub_list = string.split()
tmp_list.append(' '.join([kw.string_pre_processing(x,stemming_method='None',lemmatization=False,stop_word_removal=True,stop_words_extra=stops,verbose=False,download_nltk=False) for x in tmp_sub_list]))
tmp_data.append(tmp_list)
keywords_index = tmp_data.copy()
del tmp_data
#tmp_data = []
#for string_list in tqdm(keywords, total=len(keywords)):
# tmp_list = []
# for sub_string_list in string_list:
# tmp_list.append(' '.join(sub_string_list))
# tmp_data.append(tmp_list)
#keywords = tmp_data.copy()
#del tmp_data
# =============================================================================
# Clean-up dead words
# =============================================================================
tmp_data = []
for string_list in tqdm(abstracts, total=len(abstracts)):
tmp_data.append([x for x in string_list if x!=''])
abstracts = tmp_data.copy()
del tmp_data
tmp_data = []
for string_list in tqdm(abstracts_pure, total=len(abstracts_pure)):
tmp_data.append([x for x in string_list if x!=''])
abstracts_pure = tmp_data.copy()
del tmp_data
tmp_data = []
for string_list in tqdm(keywords, total=len(keywords)):
tmp_data.append([x for x in string_list if x!=''])
keywords = tmp_data.copy()
del tmp_data
tmp_data = []
for string_list in tqdm(keywords_index, total=len(keywords_index)):
tmp_data.append([x for x in string_list if x!=''])
keywords_index = tmp_data.copy()
del tmp_data
# =============================================================================
# Break-down abstracts again
# =============================================================================
tmp_data = []
for abstract in tqdm(abstracts):
words = []
for word in abstract:
words = words+word.split()
tmp_data.append(words)
abstracts = tmp_data.copy()
del tmp_data
tmp_data = []
for abstract in tqdm(abstracts_pure):
words = []
for word in abstract:
words = words+word.split()
tmp_data.append(words)
abstracts_pure = tmp_data.copy()
del tmp_data
# =============================================================================
# Thesaurus matching
# =============================================================================
print("\nThesaurus matching")
abstracts_backup = abstracts.copy()
abstracts_pure_backup = abstracts_pure.copy()
keywords_backup = keywords.copy()
keywords_index_backup = keywords_index.copy()
abstracts = abstracts_backup.copy()
abstracts_pure = abstracts_pure_backup.copy()
keywords = keywords_backup.copy()
keywords_index = keywords_index_backup.copy()
abstracts = kw.thesaurus_matching(abstracts,thesaurus_file='data/thesaurus/thesaurus_for_ai_keyword_with_() (testing).csv')
abstracts_pure = kw.thesaurus_matching(abstracts_pure,thesaurus_file='data/thesaurus/thesaurus_for_ai_keyword_with_() (testing).csv')
if PROCESS_KEYWORDS is True:
keywords = kw.thesaurus_matching(keywords)
keywords_index = kw.thesaurus_matching(keywords_index)
# =============================================================================
# Term to string corpus for co-word analysis
# =============================================================================
print("\nTerm to string corpus")
corpus_abstract = []
for words in tqdm(abstracts, total=len(abstracts)):
corpus_abstract.append(' '.join(words))
corpus_abstract_pure = []
for words in tqdm(abstracts_pure, total=len(abstracts_pure)):
corpus_abstract_pure.append(' '.join(words))
corpus_keywords = []
for words in tqdm(keywords, total=len(keywords)):
corpus_keywords.append(';'.join(words))
corpus_keywords_index = []
for words in tqdm(keywords_index, total=len(keywords_index)):
corpus_keywords_index.append(';'.join(words))
# =============================================================================
# Remove substrings :
# be careful with this one! It might remove parts of a string or half of a word
# =============================================================================
thesaurus = pd.read_csv('data/thesaurus/to_remove.csv')
thesaurus['alt'] = ''
thesaurus = thesaurus.values.tolist()
print("\nRemoving substrings")
corpus_abstract_tr = []
for paragraph in tqdm(corpus_abstract, total=len(corpus_abstract)):
paragraph = kw.filter_string(paragraph,thesaurus)
corpus_abstract_tr.append(paragraph)
corpus_abstract_pure_tr = []
for paragraph in tqdm(corpus_abstract_pure, total=len(corpus_abstract_pure)):
paragraph = kw.filter_string(paragraph,thesaurus)
corpus_abstract_pure_tr.append(paragraph)
corpus_keywords_tr = []
for paragraph in tqdm(corpus_keywords, total=len(corpus_keywords)):
paragraph = kw.filter_string(paragraph,thesaurus)
corpus_keywords_tr.append(paragraph)
corpus_keywords_index_tr = []
for paragraph in tqdm(corpus_keywords_index, total=len(corpus_keywords_index)):
paragraph = kw.filter_string(paragraph,thesaurus)
corpus_keywords_index_tr.append(paragraph)
# =============================================================================
# Final clean-up (double space and leading space)
# =============================================================================
tmp_data = []
for paragraph in tqdm(corpus_abstract, total=len(corpus_abstract)):
paragraph = ' '.join(paragraph.split())
tmp_data.append(paragraph)
corpus_abstract = tmp_data.copy()
del tmp_data
tmp_data = []
for paragraph in tqdm(corpus_abstract_tr, total=len(corpus_abstract_tr)):
paragraph = ' '.join(paragraph.split())
tmp_data.append(paragraph)
corpus_abstract_tr = tmp_data.copy()
del tmp_data
tmp_data = []
for paragraph in tqdm(corpus_abstract_pure, total=len(corpus_abstract_pure)):
paragraph = ' '.join(paragraph.split())
tmp_data.append(paragraph)
corpus_abstract_pure = tmp_data.copy()
del tmp_data
tmp_data = []
for paragraph in tqdm(corpus_abstract_pure_tr, total=len(corpus_abstract_pure_tr)):
paragraph = ' '.join(paragraph.split())
tmp_data.append(paragraph)
corpus_abstract_pure_tr = tmp_data.copy()
del tmp_data
tmp_data = []
for paragraph in tqdm(corpus_keywords, total=len(corpus_keywords)):
paragraph = ' '.join(paragraph.split(' '))
paragraph = ';'.join(paragraph.split(';'))
tmp_data.append(paragraph)
corpus_keywords = tmp_data.copy()
del tmp_data
tmp_data = []
for paragraph in tqdm(corpus_keywords_tr, total=len(corpus_keywords_tr)):
paragraph = ' '.join(paragraph.split(' '))
paragraph = ';'.join(paragraph.split(';'))
tmp_data.append(paragraph)
corpus_keywords_tr = tmp_data.copy()
del tmp_data
tmp_data = []
for paragraph in tqdm(corpus_keywords_index, total=len(corpus_keywords_index)):
paragraph = ' '.join(paragraph.split(' '))
paragraph = ';'.join(paragraph.split(';'))
tmp_data.append(paragraph)
corpus_keywords_index = tmp_data.copy()
del tmp_data
tmp_data = []
for paragraph in tqdm(corpus_keywords_index_tr, total=len(corpus_keywords_index_tr)):
paragraph = ' '.join(paragraph.split(' '))
paragraph = ';'.join(paragraph.split(';'))
tmp_data.append(paragraph)
corpus_keywords_index_tr = tmp_data.copy()
del tmp_data
# =============================================================================
# Write to disk
# =============================================================================
corpus_abstract = pd.DataFrame(corpus_abstract,columns=['words'])
corpus_abstract_tr = pd.DataFrame(corpus_abstract_tr,columns=['words'])
corpus_abstract_pure =
|
pd.DataFrame(corpus_abstract_pure,columns=['words'])
|
pandas.DataFrame
|
import json
import pandas as pd
import random
import os
import pyproj
import numpy as np
import geopandas as gpd
from pathlib import Path
from datetime import datetime
from copy import deepcopy
from shapely.geometry import Point
from shapely.ops import transform
from sklearn.preprocessing import OneHotEncoder
# load config file
with open(Path(os.path.dirname(os.path.realpath(__file__)), '../config.json')) as f:
config = json.load(f)
class DataLoader:
"""
Loads the combined HVP dataset containing POI data and URA land use data and performs data preparation.
"""
def __init__(self):
"""
Initialises the class object by loading the combined HVP dataset containing POI data and URA land use data.
"""
print('Loading batch data...')
batch1 = pd.read_excel(os.path.join(os.path.dirname(__file__),
config['processed_data_directory'] + 'batch_stop_data_1.xlsx'))
batch2 = pd.read_excel(os.path.join(os.path.dirname(__file__),
config['processed_data_directory'] + 'batch_stop_data_2.xlsx'))
batch3 = pd.read_excel(os.path.join(os.path.dirname(__file__),
config['processed_data_directory'] + 'batch_stop_data_3.xlsx'))
batch4 = pd.read_excel(os.path.join(os.path.dirname(__file__),
config['processed_data_directory'] + 'batch_stop_data_4.xlsx'))
batch5 = pd.read_excel(os.path.join(os.path.dirname(__file__),
config['processed_data_directory'] + 'batch_stop_data_5.xlsx'))
batch6 = pd.read_excel(os.path.join(os.path.dirname(__file__),
config['processed_data_directory'] + 'batch_stop_data_6.xlsx'))
batch7 = pd.read_excel(os.path.join(os.path.dirname(__file__),
config['processed_data_directory'] + 'batch_stop_data_7.xlsx'))
batch8 = pd.read_excel(os.path.join(os.path.dirname(__file__),
config['processed_data_directory'] + 'batch_stop_data_8.xlsx'))
self.data = pd.concat([batch1, batch2, batch3, batch4, batch5, batch6, batch7, batch8], ignore_index=True)
def check_stop_order(self, data):
"""
Checks if the stops made by each driver is in chronological order.
Parameters:
data: pd.Dataframe
Contains the combined HVP dataset.
"""
for driver_id in data['DriverID'].unique():
driver_data = deepcopy(data[data['DriverID'] == driver_id].reset_index(drop=True))
unix_time = np.array([datetime.strptime(time_str, '%Y-%m-%d %H-%M-%S').timestamp()
for time_str in driver_data['StartTime'].tolist()])
time_diff = unix_time[1:] - unix_time[:-1]
if len(driver_data) > 1:
assert np.any(time_diff < 0.0)
def _buffer_in_meters(self, lng, lat, radius):
"""
Converts a latitude, longitude coordinate pair into a buffer with user-defined radius.s
:param lng: float
Contains the longitude information.
:param lat: float
Contains the latitude information.
:param radius: float
Contains the buffer radius in metres.
:return:
buffer_latlng: Polygon
Contains the buffer.
"""
proj_meters = pyproj.CRS('EPSG:3414') # EPSG for Singapore
proj_latlng = pyproj.CRS('EPSG:4326')
project_to_metres = pyproj.Transformer.from_crs(proj_latlng, proj_meters, always_xy=True).transform
project_to_latlng = pyproj.Transformer.from_crs(proj_meters, proj_latlng, always_xy=True).transform
pt_meters = transform(project_to_metres, Point(lng, lat))
buffer_meters = pt_meters.buffer(radius)
buffer_latlng = transform(project_to_latlng, buffer_meters)
return buffer_latlng
def _extract_other_driver_activities(self, driver_data, other_driver_data):
"""
Extracts the activity information performed by other drivers in the same area.
Parameters:
driver_data: pd.Dataframe
Contains the combined HVP dataset for a particular driver.
other_driver_data: pd.Dataframe
Contains the combined HVP dataset for the other drivers.
Return:
driver: pd.Dataframe
Contains the combined HVP dataset for a particular driver + past activities of other drivers
"""
other_driver_activities = pd.DataFrame()
driver_data = gpd.GeoDataFrame(driver_data,
geometry=gpd.points_from_xy(driver_data['StopLon'],
driver_data['StopLat']))
other_driver_data = gpd.GeoDataFrame(other_driver_data,
geometry=gpd.points_from_xy(other_driver_data['StopLon'],
other_driver_data['StopLat']))
for i in range(len(driver_data)):
# create 100m circular buffer around stop
buffer = self._buffer_in_meters(driver_data.loc[i, 'StopLon'],
driver_data.loc[i, 'StopLat'], 50.0)
nearby_stops = other_driver_data[other_driver_data.intersects(buffer)].reset_index(drop=True)
if len(nearby_stops) == 0:
other_driver_activities = other_driver_activities.append(pd.Series(dtype=object), ignore_index=True)
else:
activity_cols = [col for col in nearby_stops.columns
if ('Activity.' in col)
and ('MappedActivity.' not in col)
and ('Other.' not in col)]
mapped_activity_cols = [col for col in nearby_stops.columns
if ('MappedActivity.' in col) and ('Other.' not in col)]
# calculate distribution of activities conducted near the stop
summed_activity = nearby_stops.sum()[activity_cols]
normalised_activity = (summed_activity) / (summed_activity.sum() + 1e-9)
# calculate distribution of mapped activities conducted near the stop
summed_mapped_activity = nearby_stops.sum()[mapped_activity_cols]
normalised_mapped_activity = (summed_mapped_activity) / (summed_mapped_activity.sum() + 1e-9)
# merge original and mapped activity types conducted by other drivers
other_driver_activities = other_driver_activities.append(pd.concat([normalised_activity,
normalised_mapped_activity]).T,
ignore_index=True)
assert len(driver_data) == len(other_driver_activities)
other_driver_activities_cols = ['Other.{}'.format(column) for column in other_driver_activities.columns]
other_driver_activities.columns = other_driver_activities_cols
driver_data = pd.concat([driver_data, other_driver_activities], axis=1)
driver_data.fillna(0, inplace=True)
return driver_data
def _extract_past_activities(self, data):
"""
Extracts past activities performed by each driver.
Parameters:
data: pd.Dataframe
Contains the combined HVP dataset.
Return:
new_data: pd.DataFrame
Contains the combined HVP dataset with past activities performed by each driver
"""
assert type(data) == gpd.GeoDataFrame
new_data = pd.DataFrame()
# extract unix time of each stop
data['StopUnixTime'] = [datetime.strptime(time_str, '%Y-%m-%d %H-%M-%S').timestamp()
for time_str in data['StartTime'].tolist()]
for driver_id in data['DriverID'].unique():
driver_data = deepcopy(data[data['DriverID'] == driver_id].reset_index(drop=True))
past_activities = pd.DataFrame()
for i in range(len(driver_data)):
# create 100m circular buffer around stop
buffer = self._buffer_in_meters(driver_data.loc[i, 'StopLon'],
driver_data.loc[i, 'StopLat'], 50.0)
nearby_stops = driver_data[driver_data.intersects(buffer)].reset_index(drop=True)
nearby_stops = nearby_stops[nearby_stops['StopUnixTime'] <
driver_data.loc[i, 'StopUnixTime']].reset_index(drop=True)
if len(nearby_stops) == 0:
past_activities = past_activities.append(pd.Series({'Activity.Shift': 0}), ignore_index=True)
else:
activity_cols = [col for col in nearby_stops.columns
if ('Activity.' in col) and
('MappedActivity.' not in col) and
('Other.' not in col)]
mapped_activity_cols = [col for col in nearby_stops.columns
if ('MappedActivity.' in col) and
('Other.' not in col)]
# calculate distribution of activities conducted near the stop
summed_activity = nearby_stops.sum()[activity_cols]
normalised_activity = (summed_activity) / (summed_activity.sum() + 1e-9)
# calculate distribution of mapped activities conducted near the stop
summed_mapped_activity = nearby_stops.sum()[mapped_activity_cols]
normalised_mapped_activity = (summed_mapped_activity) / (summed_mapped_activity.sum() + 1e-9)
past_activities = past_activities.append(pd.concat([normalised_activity,
normalised_mapped_activity]).T,
ignore_index=True)
assert len(driver_data) == len(past_activities)
past_activities_cols = ['Past.{}'.format(column) for column in past_activities.columns]
past_activities.columns = past_activities_cols
driver_data = pd.concat([driver_data, past_activities], axis=1)
driver_data.fillna(0, inplace=True)
new_data = pd.concat([new_data, driver_data], ignore_index=True)
new_data.fillna(0, inplace=True)
return new_data
def _one_hot_encoding(self, train_col, test_col, feature_name):
"""
Performs one hot encoding of a particular column for both training and test datasets.
Parameters:
train_col: pd.Series
Contains the column to be one-hot-encoded from the training dataset.
test_col: pd.Series
Contains the column to be one-hot-encoded from the test dataset.
feature_name: str
Contains the name of the feature to be one-hot-encoded.
Return:
train_onehot_df: pd.Dataframe
Contains the one-hot-encoded dataframe of the column in the training dataset.
test_onehot_df: pd.Dataframe
Contains the one-hot-encoded dataframe of the column in the test dataset.
"""
encoder = OneHotEncoder(sparse=False)
encoder.fit(np.array(pd.concat([train_col, test_col], ignore_index=True)).reshape(-1, 1))
train_onehot_df = pd.DataFrame(encoder.transform(np.array(train_col).reshape(-1, 1)),
columns=['{}.{}'.format(feature_name, cat.replace('X_', ''))
for cat in encoder.get_feature_names(['X'])])
test_onehot_df = pd.DataFrame(encoder.transform(np.array(test_col).reshape(-1, 1)),
columns=['{}.{}'.format(feature_name, cat.replace('X_', ''))
for cat in encoder.get_feature_names(['X'])])
return train_onehot_df, test_onehot_df
def _extract_last_activity(self, data):
"""
Extracts last activity information.
Parameters:
data: pd.DataFrame
Contains the verified stops information.
Return:
data: pd.DataFrame
Contains the verified stops information with last activity information.
"""
activity_cols = [col for col in list(data.columns) if "MappedActivity." in col]
activity_array = data[activity_cols].values
last_activity_array = np.zeros(activity_array.shape)
last_activity_array[1:, :] = activity_array[:-1, :]
last_activity_df = pd.DataFrame(last_activity_array,
columns=[col.replace("MappedActivity", "LastActivity")
for col in activity_cols])
data = pd.concat([data, last_activity_df], axis=1)
return data
def train_test_split(self, test_ratio=0.25):
"""
Performs train test split on the combined HVP dataset and performs feature extraction.
Parameters:
test_ratio: float
Contains the ratio for the test dataset.
Return:
train_data: pd.Dataframe
Contains the training dataset after feature extraction.
test_data: pd.Dataframe
Contains the test dataset after feature extraction.
"""
# check local directory and load cache if available
print('Performing train test split...')
if (os.path.exists(os.path.join(os.path.dirname(__file__),
config['processed_data_directory'] + 'train_data.xlsx'))) and \
(os.path.exists(os.path.join(os.path.dirname(__file__),
config['processed_data_directory'] + 'test_data.xlsx'))):
train_data = pd.read_excel(os.path.join(os.path.dirname(__file__),
config['processed_data_directory'] + 'train_data.xlsx'))
test_data = pd.read_excel(os.path.join(os.path.dirname(__file__),
config['processed_data_directory'] + 'test_data.xlsx'))
return train_data, test_data
# extract last activity information
print("Extract last activity information...")
self.data = self._extract_last_activity(self.data)
# perform train test split
driver_id = self.data['DriverID'].unique()
random.shuffle(driver_id)
test_id = driver_id[:int(len(driver_id) * test_ratio)]
train_id = driver_id[int(len(driver_id) * test_ratio):]
train_data = self.data[self.data['DriverID'].isin(train_id)].reset_index(drop=True)
test_data = self.data[self.data['DriverID'].isin(test_id)].reset_index(drop=True)
# perform one hot encoding
print('Performing one hot encoding...')
train_vehtype, test_vehtype = self._one_hot_encoding(train_data['VehicleType'],
test_data['VehicleType'], 'VehicleType')
train_dayofweek, test_dayofweek = self._one_hot_encoding(train_data['DayOfWeekStr'],
test_data['DayOfWeekStr'], 'DayOfWeek')
train_landuse, test_landuse = self._one_hot_encoding(train_data['MappedLandUseType'],
test_data['MappedLandUseType'], 'LandUse')
assert len(train_vehtype) == len(train_data)
assert len(train_dayofweek) == len(train_data)
assert len(train_landuse) == len(train_data)
assert len(test_vehtype) == len(test_data)
assert len(test_dayofweek) == len(test_data)
assert len(test_landuse) == len(test_data)
train_data =
|
pd.concat([train_data, train_vehtype, train_dayofweek, train_landuse], axis=1)
|
pandas.concat
|
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import (
assert_frame_equal,
assert_index_equal,
assert_series_equal,
)
from evalml.pipelines.components import LabelEncoder
def test_label_encoder_init():
encoder = LabelEncoder()
assert encoder.parameters == {"positive_label": None}
assert encoder.random_seed == 0
def test_label_encoder_fit_transform_y_is_None():
X = pd.DataFrame({})
y = pd.Series(["a", "b"])
encoder = LabelEncoder()
with pytest.raises(ValueError, match="y cannot be None"):
encoder.fit(X)
encoder.fit(X, y)
with pytest.raises(ValueError, match="y cannot be None"):
encoder.inverse_transform(None)
def test_label_encoder_transform_y_is_None():
X = pd.DataFrame({})
y = pd.Series(["a", "b"])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X)
assert_frame_equal(X, X_t)
assert y_t is None
def test_label_encoder_fit_transform_with_numeric_values_does_not_encode():
X = pd.DataFrame({})
# binary
y = pd.Series([0, 1, 1, 1, 0])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y, y_t)
# multiclass
X = pd.DataFrame({})
y = pd.Series([0, 1, 1, 2, 0, 2])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y, y_t)
def test_label_encoder_fit_transform_with_numeric_values_needs_encoding():
X = pd.DataFrame({})
# binary
y = pd.Series([2, 1, 2, 1])
y_expected = pd.Series([1, 0, 1, 0])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y_expected, y_t)
# multiclass
y = pd.Series([0, 1, 1, 3, 0, 3])
y_expected = pd.Series([0, 1, 1, 2, 0, 2])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y_expected, y_t)
def test_label_encoder_fit_transform_with_categorical_values():
X = pd.DataFrame({})
# binary
y = pd.Series(["b", "a", "b", "b"])
y_expected = pd.Series([1, 0, 1, 1])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y_expected, y_t)
# multiclass
y = pd.Series(["c", "a", "b", "c", "d"])
y_expected = pd.Series([2, 0, 1, 2, 3])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y_expected, y_t)
def test_label_encoder_fit_transform_equals_fit_and_transform():
X = pd.DataFrame({})
y = pd.Series(["a", "b", "c", "a"])
encoder = LabelEncoder()
X_fit_transformed, y_fit_transformed = encoder.fit_transform(X, y)
encoder_duplicate = LabelEncoder()
encoder_duplicate.fit(X, y)
X_transformed, y_transformed = encoder_duplicate.transform(X, y)
assert_frame_equal(X_fit_transformed, X_transformed)
assert_series_equal(y_fit_transformed, y_transformed)
def test_label_encoder_inverse_transform():
X = pd.DataFrame({})
y = pd.Series(["a", "b", "c", "a"])
y_expected = ww.init_series(y)
encoder = LabelEncoder()
_, y_fit_transformed = encoder.fit_transform(X, y)
y_inverse_transformed = encoder.inverse_transform(y_fit_transformed)
assert_series_equal(y_expected, y_inverse_transformed)
y_encoded = pd.Series([1, 0, 2, 1])
y_expected = ww.init_series(pd.Series(["b", "a", "c", "b"]))
y_inverse_transformed = encoder.inverse_transform(y_encoded)
assert_series_equal(y_expected, y_inverse_transformed)
def test_label_encoder_with_positive_label_multiclass_error():
y = pd.Series(["a", "b", "c", "a"])
encoder = LabelEncoder(positive_label="a")
with pytest.raises(
ValueError,
match="positive_label should only be set for binary classification targets",
):
encoder.fit(None, y)
def test_label_encoder_with_positive_label_missing_from_input():
y = pd.Series(["a", "b", "a"])
encoder = LabelEncoder(positive_label="z")
with pytest.raises(
ValueError,
match="positive_label was set to `z` but was not found in the input target data.",
):
encoder.fit(None, y)
@pytest.mark.parametrize(
"y, positive_label, y_encoded_expected",
[
(
pd.Series([True, False, False, True]),
False,
pd.Series([0, 1, 1, 0]),
), # boolean
(
pd.Series([True, False, False, True]),
True,
pd.Series([1, 0, 0, 1]),
), # boolean
(
pd.Series([0, 1, 1, 0]),
0,
pd.Series([1, 0, 0, 1]),
), # int, 0 / 1, encoding should flip
(
pd.Series([0, 1, 1, 0]),
1,
pd.Series([0, 1, 1, 0]),
), # int, 0 / 1, encoding should not change
(
pd.Series([6, 2, 2, 6]),
6,
pd.Series([1, 0, 0, 1]),
), # ints, not 0 / 1, encoding should not change
(
pd.Series([6, 2, 2, 6]),
2,
pd.Series([0, 1, 1, 0]),
), # ints, not 0 / 1, encoding should flip
(pd.Series(["b", "a", "a", "b"]), "a", pd.Series([0, 1, 1, 0])), # categorical
(pd.Series(["b", "a", "a", "b"]), "b", pd.Series([1, 0, 0, 1])), # categorical
],
)
def test_label_encoder_with_positive_label(y, positive_label, y_encoded_expected):
encoder = LabelEncoder(positive_label=positive_label)
_, y_fit_transformed = encoder.fit_transform(None, y)
assert_series_equal(y_encoded_expected, y_fit_transformed)
y_inverse_transformed = encoder.inverse_transform(y_fit_transformed)
assert_series_equal(ww.init_series(y), y_inverse_transformed)
def test_label_encoder_with_positive_label_fit_different_from_transform():
encoder = LabelEncoder(positive_label="a")
y = pd.Series(["a", "b", "b", "a"])
encoder.fit(None, y)
with pytest.raises(ValueError, match="y contains previously unseen labels"):
encoder.transform(None, pd.Series(["x", "y", "x"]))
@pytest.mark.parametrize("use_positive_label", [True, False])
def test_label_encoder_transform_does_not_have_all_labels(use_positive_label):
encoder = LabelEncoder(positive_label="a" if use_positive_label else None)
y = pd.Series(["a", "b", "b", "a"])
encoder.fit(None, y)
expected = (
pd.Series([1, 1, 1, 1]) if use_positive_label else pd.Series([0, 0, 0, 0])
)
_, y_transformed = encoder.transform(None, pd.Series(["a", "a", "a", "a"]))
assert_series_equal(expected, y_transformed)
def test_label_encoder_with_positive_label_with_custom_indices():
encoder = LabelEncoder(positive_label="a")
y = pd.Series(["a", "b", "a"])
encoder.fit(None, y)
y_with_custom_indices = pd.Series(["b", "a", "a"], index=[5, 6, 7])
_, y_transformed = encoder.transform(None, y_with_custom_indices)
|
assert_index_equal(y_with_custom_indices.index, y_transformed.index)
|
pandas.testing.assert_index_equal
|
"""
Python 3.9 дополнительная функция для более привлекательной визуализации доски
Название файла visualize_board_c4.py
Version: 0.1
Author: <NAME>
Date: 2021-12-20
"""
#!/usr/bin/env python
import matplotlib.pyplot as plt
from matplotlib.table import Table
import pandas as pd
import numpy as np
def view_board(np_data, fmt='{:s}', bkg_colors=['pink', 'pink']):
data =
|
pd.DataFrame(np_data, columns=['0','1','2','3','4','5','6'])
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
from powersimdata.design.mimic_grid import mimic_generation_capacity
from powersimdata.input.grid import Grid
from powersimdata.network.model import area_to_loadzone
from powersimdata.scenario.scenario import Scenario
def _check_solar_fraction(solar_fraction):
"""Checks that the solar_fraction is between 0 and 1, or is None.
:param float scale_fraction: desired solar fraction for new capacity.
:raises TypeError: if type is not int, float, or None.
:raises ValueError: if value is not between 0 and 1.
"""
if solar_fraction is None:
pass
elif isinstance(solar_fraction, (int, float)):
if not (0 <= solar_fraction <= 1):
raise ValueError("solar_fraction must be between 0 and 1")
else:
raise TypeError("solar_fraction must be int/float or None")
def _apply_zone_scale_factor_to_ct(ct, fuel, zone_id, scale_factor):
"""Applies a zone scaling factor to a change table, creating internal
change table structure as necessary. New keys are added, existing keys are
multiplied.
:param dict ct: a dictionary of scale factors, with structure matching
ct from powersimdata.input.change_table.ChangeTable.
:param str fuel: the fuel to be scaled.
:param int zone_id: the zone_id to be scaled.
:param int/float scale_factor: how much the zone should be scaled up by.
"""
if fuel not in ct:
ct[fuel] = {}
if "zone_id" not in ct[fuel]:
ct[fuel]["zone_id"] = {}
if zone_id not in ct[fuel]["zone_id"]:
ct[fuel]["zone_id"][zone_id] = scale_factor
else:
ct[fuel]["zone_id"][zone_id] *= scale_factor
def load_targets_from_csv(filename, drop_ignored=True):
"""Interprets a CSV file as a set of targets, ensuring that required columns are present,
and filling in default values for optional columns.
:param str filename: filepath to targets csv.
:param bool drop_ignored: if True, drop all ignored columns from output.
:return: (*pandas.DataFrame*) -- DataFrame of targets from csv file
:raises TypeError: if filename is not a string
:raises ValueError: if one or more required columns is missing.
"""
# Constants
mandatory_columns = {
"region_name",
"ce_target_fraction",
}
optional_column_defaults = {
"allowed_resources": "solar, wind",
"external_ce_addl_historical_amount": 0,
"solar_percentage": np.nan,
"area_type": np.nan,
}
# Validate input
if not isinstance(filename, str):
raise TypeError("filename must be a str")
# Interpret as object so that we can fillna() with a mixed-type dict
raw_targets = pd.read_csv(filename).astype(object)
raw_columns = set(raw_targets.columns)
if not mandatory_columns <= raw_columns:
missing_columns = mandatory_columns - raw_columns
raise ValueError(f'Missing columns: {", ".join(missing_columns)}')
raw_targets.set_index("region_name", inplace=True)
# Report which columns are used vs. unused
ignored_columns = raw_columns - mandatory_columns - optional_column_defaults.keys()
print(f"ignoring: {ignored_columns}")
if drop_ignored:
raw_targets.drop(ignored_columns, axis=1, inplace=True)
for column in optional_column_defaults.keys():
# Fill optional columns that are missing entirely
if column not in raw_columns:
raw_targets[column] = np.nan
# Fill any empty cells within optional columns
raw_targets.fillna(value=optional_column_defaults, inplace=True)
return raw_targets
def _make_zonename2target(grid, targets):
"""Creates a dictionary of {zone_name: target_name} pairs.
:param powersimdata.input.grid.Grid grid: Grid instance defining the set of zones.
:param pandas.DataFrame targets: a dataframe used to look up constituent zones.
:return: (*dict*) -- a dictionary of {zone_name: target_name} pairs.
:raises ValueError: if a zone is not present in any target areas, or
if a zone is present in more than one target area.
"""
grid_model = grid.grid_model
target_zones = {
target_name: area_to_loadzone(grid_model, target_name)
if pd.isnull(targets.loc[target_name, "area_type"])
else area_to_loadzone(
grid_model, target_name, targets.loc[target_name, "area_type"]
)
for target_name in targets.index.tolist()
}
# Check for any collisions
zone_sets = target_zones.values()
if len(set.union(*zone_sets)) != sum([len(t) for t in zone_sets]):
zone_sets_list = [zone for _set in zone_sets for zone in _set]
duplicates = {zone for zone in zone_sets_list if zone_sets_list.count(zone) > 1}
error_areas = {
zone: {area for area, zone_set in target_zones.items() if zone in zone_set}
for zone in duplicates
}
error_msgs = [f"{k} within: {', '.join(v)}" for k, v in error_areas.items()]
raise ValueError(f"Zone(s) within multiple area! {'; '.join(error_msgs)}")
zonename2target = {}
for target_name, zone_set in target_zones.items():
# Filter out parts of states not in the interconnect(s) in this Grid
filtered_zone_set = zone_set & set(grid.zone2id.keys())
zonename2target.update({zone: target_name for zone in filtered_zone_set})
untargetted_zones = set(grid.zone2id.keys()) - set(zonename2target.keys())
if len(untargetted_zones) > 0:
err_msg = f"Targets do not cover all load zones. Missing: {untargetted_zones}"
raise ValueError(err_msg)
return zonename2target
def _get_scenario_length(scenario):
"""Get the number of hours in a scenario.
:param powersimdata.scenario.scenario.Scenario scenario: A Scenario instance.
:return: (*int*) -- the number of hours in the scenario.
"""
if not isinstance(scenario, Scenario):
raise TypeError("next_scenario must be a Scenario object")
if scenario.state.name == "create":
start_ts = pd.Timestamp(scenario.state.builder.start_date)
end_ts = pd.Timestamp(scenario.state.builder.end_date)
else:
start_ts =
|
pd.Timestamp(scenario.info["start_date"])
|
pandas.Timestamp
|
import os
import logging
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from prophet import Prophet
from sklearn import metrics # gcp cloud function deploy failed due to import sklearn...
from models.model_abc import Model
# https://github.com/facebook/prophet/issues/223
class suppress_stdout_stderr(object):
'''
A context manager for doing a "deep suppression" of stdout and stderr in
Python, i.e. will suppress all print, even if the print originates in a
compiled C/Fortran sub-function.
This will not suppress raised exceptions, since exceptions are printed
to stderr just before a script exits, and after the context manager has
exited (at least, I think that is why it lets exceptions through).
'''
def __init__(self):
# Open a pair of null files
self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
# Save the actual stdout (1) and stderr (2) file descriptors.
self.save_fds = (os.dup(1), os.dup(2))
def __enter__(self):
# Assign the null pointers to stdout and stderr.
os.dup2(self.null_fds[0], 1)
os.dup2(self.null_fds[1], 2)
def __exit__(self, *_):
# Re-assign the real stdout/stderr back to (1) and (2)
os.dup2(self.save_fds[0], 1)
os.dup2(self.save_fds[1], 2)
# Close the null files
os.close(self.null_fds[0])
os.close(self.null_fds[1])
class LibFBProphet(Model):
enable_plot = False
train_ratio = 0.9
def __init__(self):
logging.getLogger('fbprophet').setLevel(logging.WARNING)
# data = {
# 'args': {
# 'using_regressors': ['Open', 'High', 'Low', 'Volume']
# 'forecast_periods': 30 # use for run_predict
# 'training_ratio': 0.9 # use for run_validate
# }
# 'target_data': {
# 'name': 'name'
# 'data': obj # dataframe
# 'file_path': '{data path}' # use it if no data key
# 'type': 'stock' # stock or market
# },
# 'feature_data': [
# {
# 'using_regressors': ['Open', 'High', 'Low', 'Volume']
# 'name': 'name'
# 'data': obj # dataframe
# 'file_path': '{data path}' # use it if no data key
# 'type': 'stock' # stock or market
# }
# ]
# }
@staticmethod
def __load_data(target_or_feature_data):
if 'data' not in target_or_feature_data:
if target_or_feature_data['type'] == 'stock':
return pd.read_json(target_or_feature_data['file_path'], orient='records')
elif target_or_feature_data['type'] == 'market':
with open(target_or_feature_data['file_path'], 'r', encoding='utf-8') as f:
market_data = json.loads(f.read())
market_data_records = json.dumps(market_data['data'])
return pd.read_json(market_data_records, orient='records')
else:
logging.error('not support data type')
return None
def run_validate(self, data):
logging.debug(data['args'])
if 'enable_plot' in data['args']:
self.enable_plot = data['args']['enable_plot']
if 'train_ratio' in data['args']:
self.train_ratio = data['args']['train_ratio']
using_regressors = data['args']['using_regressors']
name = data['target_data']['name']
# load target_data with df
df_data = LibFBProphet.__load_data(data['target_data'])
for feature_data in data['feature_data']:
df_feat_data = LibFBProphet.__load_data(feature_data)
for col in df_feat_data.columns:
if col in feature_data['using_regressors']:
new_col_name = 'feat_' + feature_data['name'] + '_' + col
using_regressors.append(new_col_name)
df_feat_data.rename(columns={col: new_col_name}, inplace=True)
df_data = df_data.merge(df_feat_data, on='Date', how='left').dropna()
# reverse data order from latest start -> oldest start
df = df_data[::-1]
df.rename(columns={'Date': 'ds', 'Close': 'y'}, inplace=True)
train_size = int(df.shape[0] * self.train_ratio)
train_data = df[0:train_size]
test_data = df[train_size:df.shape[0]]
forecast_with_org_data = self.__run_model(train_data, using_regressors, df.shape[0] - train_size, name)
if self.enable_plot:
plt.show()
logging.info("MSE: {}".format(
metrics.mean_squared_error(forecast_with_org_data['yhat'][train_size:df.shape[0]], test_data['y'])))
logging.info("MAE: {}".format(
metrics.mean_absolute_error(forecast_with_org_data['yhat'][train_size:df.shape[0]], test_data['y'])))
return NotImplemented
def run_predict(self, data):
logging.debug(data['args'])
if 'enable_plot' in data['args']:
self.enable_plot = data['args']['enable_plot']
using_regressors = data['args']['using_regressors']
forecast_periods = data['args']['forecast_periods']
name = data['target_data']['name']
# load target_data with df
df_data = LibFBProphet.__load_data(data['target_data'])
for feature_data in data['feature_data']:
df_feat_data = LibFBProphet.__load_data(feature_data)
for col in df_feat_data.columns:
if col in feature_data['using_regressors']:
new_col_name = 'feat_' + feature_data['name'] + '_' + col
using_regressors.append(new_col_name)
df_feat_data.rename(columns={col: new_col_name}, inplace=True)
df_data = df_data.merge(df_feat_data, on='Date', how='left').dropna()
# reverse data order from latest start -> oldest start
df = df_data[::-1]
df.rename(columns={'Date': 'ds', 'Close': 'y'}, inplace=True)
forecast_with_org_data = self.__run_model(df, using_regressors, forecast_periods, name)
if self.enable_plot:
plt.show()
# rename
final_forecast = forecast_with_org_data.reset_index()
final_forecast.rename(
columns={'ds': 'Date', 'y': 'Close',
'yhat': 'Predict', 'yhat_upper': 'Predict_Upper', 'yhat_lower': 'Predict_Lower',
'trend': 'Trend', 'trend_upper': 'Trend_Upper', 'trend_lower': 'Trend_Lower'}, inplace=True)
return final_forecast
def __run_model(self, df_data, using_regressors, forecast_periods, name):
m = Prophet()
df_log = df_data.copy()
df_log['y'] = np.log(df_data['y'])
regressors = {}
for r in using_regressors:
if r in df_data.columns.values:
o = LibFBProphet.__predict_single_var_future(df_data[['ds', r]].copy(), r, forecast_periods)
regressors[name + '_' + r] = pd.concat([df_data[r], o], ignore_index=True)
df_log[name + '_' + r] = np.log(df_data[r])
m.add_regressor(name + '_' + r)
with suppress_stdout_stderr():
m.fit(df_log)
future = m.make_future_dataframe(periods=forecast_periods)
for r in using_regressors:
if r in df_data.columns.values:
future[name + '_' + r] = np.log(regressors[name + '_' + r])
forecast = m.predict(future)
logging.debug(forecast)
logging.debug(forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail())
train_close = pd.DataFrame(df_data[['ds', 'y']]).set_index('ds')
forecast_with_org_data = forecast.set_index('ds').join(train_close)
forecast_with_org_data = forecast_with_org_data[['y', 'yhat', 'yhat_upper', 'yhat_lower', 'trend', 'trend_upper', 'trend_lower']]
forecast_with_org_data['yhat'] = np.exp(forecast_with_org_data.yhat)
forecast_with_org_data['yhat_upper'] = np.exp(forecast_with_org_data.yhat_upper)
forecast_with_org_data['yhat_lower'] = np.exp(forecast_with_org_data.yhat_lower)
if self.enable_plot:
m.plot(forecast)
m.plot_components(forecast)
forecast_with_org_data[['y', 'yhat', 'yhat_upper', 'yhat_lower']].plot(figsize=(8, 6))
return forecast_with_org_data
@staticmethod
def __predict_single_var_future(df_data, header_name, forecast_periods):
df_data.rename(columns={header_name: 'y'}, inplace=True)
df_log = df_data.copy()
df_log['y'] = np.log(df_data['y'])
m = Prophet()
with suppress_stdout_stderr():
m.fit(df_log)
future = m.make_future_dataframe(periods=forecast_periods)
forecast = m.predict(future)
logging.debug(forecast.head())
logging.debug(forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail())
df_close =
|
pd.DataFrame(df_data[['ds', 'y']])
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm.auto import tqdm
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import KFold, StratifiedKFold, GroupKFold
from sklearn.metrics import roc_auc_score
import lightgbm as lgb
from catboost import CatBoostClassifier
import warnings
warnings.filterwarnings('ignore')
# Feature Engineering Func
def trans_issueDate(issueDate):
year, month, day = issueDate.split('-')
return int(year)*12 + int(month)
def get_issueDate_day(issueDate):
year, month, day = issueDate.split('-')
return int(day)
def trans_earliesCreditLine(earliesCreditLine):
month_dict = {"Jan":1, "Feb":2, "Mar":3, "Apr":4, "May":5, "Jun":6, \
"Jul":7, "Aug":8, "Sep":9, "Oct":10, "Nov":11, "Dec":12}
month, year = earliesCreditLine.split('-')
month = month_dict[month]
return int(year)*12 + month
def trans_employmentLength_num(employmentLength):
if employmentLength=='10+ years':
return 15
elif employmentLength=='< 1 year':
return 0
else:
return str(employmentLength)[:2]
employmentLength_dict = {'1 year':1,'10+ years':10,'2 years':2,'3 years':3,'4 years':4,
'5 years':5,'6 years':6,'7 years':7,'8 years':8,'9 years':9,'< 1 year':0}
cate_features = [
'term',
'grade',
'subGrade',
'employmentTitle',
'employmentLength',
'homeOwnership',
'verificationStatus',
'purpose',
'delinquency_2years',
'earliesCreditLine',
'postCode',
'regionCode',
'title',
'issueDate',
# bins_10
'loanAmnt_bin',
'annualIncome_bin',
# bins_100
'interestRate_bin',
'dti_bin',
'installment_bin',
'revolBal_bin',
'revolUtil_bin'
]
def gen_new_feats(train, test):
train['earliesCreditLine'] = train['earliesCreditLine'].apply(lambda x: trans_earliesCreditLine(x))
test['earliesCreditLine'] = test['earliesCreditLine'].apply(lambda x: trans_issueDate(x))
# Step 1: concat train & test -> data
data = pd.concat([train, test])
# Step 2.1 : Feature Engineering Part 1
print('LabelEncoder...')
encoder = LabelEncoder()
data['grade'] = encoder.fit_transform(data['grade'])
data['subGrade'] = encoder.fit_transform(data['subGrade'])
data['postCode'] = encoder.fit_transform(data['postCode'])
data['employmentTitle'] = encoder.fit_transform(data['employmentTitle'])
print('generate new features...')
# data['employmentLength'] = data['employmentLength'].apply(lambda x: trans_employmentLength_num(x))
data['employmentLength'] = data['employmentLength'].apply(lambda x: x if x not in employmentLength_dict else employmentLength_dict[x])
data['issueDate_Day'] = data['issueDate'].apply(lambda x: get_issueDate_day(x))
data['issueDate'] = data['issueDate'].apply(lambda x: trans_issueDate(x))
data['date_Diff'] = data['issueDate'] - data['earliesCreditLine'] # 本次贷款距离上次的时间
data['debt'] = data['dti'] * data['annualIncome']
data['acc_ratio'] = data['openAcc'] / (data['openAcc'] + 0.1)
data['revolBal_annualIncome_r'] = data['revolBal'] / (data['annualIncome'] + 0.1)
data['revolTotal'] = 100*data['revolBal'] / (100 - data['revolUtil'])
data['pubRec_openAcc_r'] = data['pubRec'] / (data['openAcc'] + 0.1)
data['pubRec_totalAcc_r'] = data['pubRec'] / (data['totalAcc'] + 0.1)
# step2.2: Binning
print('Binning...')
bin_nums = 10
bin_labels = [i for i in range(bin_nums)]
binning_features = ['loanAmnt', 'annualIncome']
for f in binning_features:
data['{}_bin'.format(f)] = pd.qcut(data[f], bin_nums, labels=bin_labels).astype(np.float64)
bin_nums = 50
bin_labels = [i for i in range(bin_nums)]
binning_features = ['interestRate', 'dti', 'installment', 'revolBal','revolUtil']
for f in binning_features:
data['{}_bin'.format(f)] = pd.qcut(data[f], bin_nums, labels=bin_labels).astype(np.float64)
for f in cate_features:
data[f] = data[f].fillna(0).astype('int')
return data[data['isDefault'].notnull()], data[data['isDefault'].isnull()]
def gen_target_encoding_feats(train, test, encode_cols, target_col, n_fold=10):
'''生成target encoding特征'''
# for training set - cv
tg_feats = np.zeros((train.shape[0], len(encode_cols)))
kfold = StratifiedKFold(n_splits=n_fold, random_state=2021, shuffle=True)
for _, (train_index, val_index) in enumerate(kfold.split(train[encode_cols], train[target_col])):
df_train, df_val = train.iloc[train_index], train.iloc[val_index]
for idx, col in enumerate(encode_cols):
target_mean_dict = df_train.groupby(col)[target_col].mean()
df_val[f'{col}_mean_target'] = df_val[col].map(target_mean_dict)
tg_feats[val_index, idx] = df_val[f'{col}_mean_target'].values
for idx, encode_col in enumerate(encode_cols):
train[f'{encode_col}_mean_target'] = tg_feats[:, idx]
# for testing set
for col in encode_cols:
target_mean_dict = train.groupby(col)[target_col].mean()
test[f'{col}_mean_target'] = test[col].map(target_mean_dict).astype(np.float64)
return train, test
encoding_cate_features = [
'term',
'grade',
'subGrade',
'employmentTitle',
'employmentLength',
'homeOwnership',
'verificationStatus',
'purpose',
'delinquency_2years',
'earliesCreditLine',
'postCode',
'regionCode',
'title',
'issueDate',
# bins_10
'loanAmnt_bin', 'annualIncome_bin',
# bins_100
'interestRate_bin', 'dti_bin', 'installment_bin', 'revolBal_bin','revolUtil_bin'
]
TRAIN_FEAS = [
#'id',
'loanAmnt',
'term',
'interestRate',
'installment',
'grade',
'subGrade',
'employmentTitle',
'employmentLength',
'homeOwnership',
'annualIncome',
'verificationStatus',
'issueDate',
'purpose',
'postCode',
'regionCode',
'dti',
'delinquency_2years',
'ficoRangeLow',
# 'ficoRangeHigh',
'openAcc',
'pubRec',
'pubRecBankruptcies',
'revolBal',
'revolUtil',
'totalAcc',
'initialListStatus',
'applicationType',
'earliesCreditLine',
'title',
'policyCode',
'n0', 'n1', 'n2', 'n3', 'n4', 'n5', 'n6', 'n7', 'n8',
'n9', 'n10', 'n11', 'n12', 'n13', 'n14',
'issueDate_Day', 'date_Diff','debt', 'acc_ratio',
'revolBal_annualIncome_r', 'revolTotal','pubRec_openAcc_r',
'pubRec_totalAcc_r', 'loanAmnt_bin','annualIncome_bin',
'interestRate_bin', 'dti_bin', 'installment_bin',
'revolBal_bin', 'revolUtil_bin', 'term_mean_target',
'grade_mean_target', 'subGrade_mean_target',
'employmentTitle_mean_target', 'employmentLength_mean_target',
'homeOwnership_mean_target', 'verificationStatus_mean_target',
'purpose_mean_target', 'delinquency_2years_mean_target',
'earliesCreditLine_mean_target', 'postCode_mean_target',
'regionCode_mean_target', 'title_mean_target', 'issueDate_mean_target',
'loanAmnt_bin_mean_target', 'annualIncome_bin_mean_target',
'interestRate_bin_mean_target', 'dti_bin_mean_target',
'installment_bin_mean_target', 'revolBal_bin_mean_target',
'revolUtil_bin_mean_target'
]
cate_features=[
# 'term',
# 'grade',
# 'subGrade',
# 'employmentTitle',
# 'employmentLength',
# 'homeOwnership',
# 'verificationStatus',
# 'purpose',
# 'delinquency_2years',
# 'earliesCreditLine',
# 'postCode',
# 'regionCode',
# 'title',
# 'issueDate',
# 'loanAmnt_bin',
# 'annualIncome_bin',
# 'interestRate_bin',
# 'dti_bin',
# 'installment_bin',
# 'revolBal_bin',
# 'revolUtil_bin'
]
seed0=2021
lgb_param = {
'objective': 'binary', # 自定义
'metric':'auc',
'boosting_type': 'gbdt',
# 'max_bin':100,
# 'min_data_in_leaf':500,
'learning_rate': 0.05,
'subsample': 0.82,
'subsample_freq': 1,
'feature_fraction': 0.88,
'lambda_l1': 6.1,
'lambda_l2': 1.3,
'max_depth':13,
'min_child_weight': 18.5,
'min_data_in_leaf': 97,
'min_gain_to_split': 0.057,
'num_leaves':24,
# 'categorical_column':[0], # stock_id
'seed':seed0,
'feature_fraction_seed': seed0,
'bagging_seed': seed0,
'drop_seed': seed0,
'data_random_seed': seed0,
'n_jobs':-1,
# 'device':'cuda',
'verbose': -1}
cat_params = {
'iterations': 50000, # 3000
'depth':6,
'l2_leaf_reg':5,
'learning_rate': 0.02, # 0.05
'loss_function':'CrossEntropy',
'eval_metric': 'AUC',
'task_type':'GPU',
'random_seed': 2021,
"early_stopping_rounds": 200,
'verbose':100,
# 'logging_level': 'Silent',
'use_best_model': True,
}
def train_and_evaluate_lgb(train, test, params, split_seed):
# Hyperparammeters (just basic)
features = TRAIN_FEAS
print('features num: ', len(features))
print('cate features num: ', len(cate_features))
y = train['isDefault']
oof_predictions = np.zeros(train.shape[0])
test_predictions = np.zeros(test.shape[0])
kfold = KFold(n_splits = 5, random_state = split_seed, shuffle = True)
for fold, (trn_ind, val_ind) in enumerate(kfold.split(train)):
print(f'Training fold {fold + 1}')
x_train, x_val = train.iloc[trn_ind], train.iloc[val_ind]
y_train, y_val = y.iloc[trn_ind], y.iloc[val_ind]
train_dataset = lgb.Dataset(x_train[features], y_train)
val_dataset = lgb.Dataset(x_val[features], y_val)
model = lgb.train(params = params,
num_boost_round = 10000, # 1000
categorical_feature=cate_features,
train_set = train_dataset,
valid_sets = [train_dataset, val_dataset],
verbose_eval = 200,
early_stopping_rounds=150, # 50
)
# Add predictions to the out of folds array
oof_predictions[val_ind] = model.predict(x_val[features])
# Predict the test set
test_predictions += model.predict(test[features]) / 5
score = roc_auc_score(y, oof_predictions)
print(f'Our out of folds roc_auc is {score}')
return test_predictions
def train_and_evaluate_cat(train, test, params, split_seed):
# Hyperparammeters (just basic)
y = train['isDefault']
features = TRAIN_FEAS
oof_predictions = np.zeros(train.shape[0])
test_predictions = np.zeros(test.shape[0])
nsplits = 5
kfold = KFold(n_splits = nsplits, random_state = split_seed, shuffle = True)
for fold, (trn_ind, val_ind) in enumerate(kfold.split(train)):
print(f'Training fold {fold + 1}')
x_train, x_val = train[features].iloc[trn_ind], train[features].iloc[val_ind]
y_train, y_val = y.iloc[trn_ind], y.iloc[val_ind]
model = CatBoostClassifier(**params)
model.fit(x_train,
y_train,
eval_set=(x_val,y_val),
# eval_set=(test[:150000][features],test_a_tgt),
cat_features=cate_features,
use_best_model=True,
verbose=200
)
# Add predictions to the out of folds array
oof_predictions[val_ind] = model.predict_proba(x_val[features])[:,1]
# Predict the test set
# test_predictions += model.predict_proba(test[features])[:,1] / nsplits
test_predictions += model.predict_proba(test[features])[:,1] / nsplits
score = roc_auc_score(y, oof_predictions)
print(f'Our out of folds roc_auc is {score}')
return test_predictions
if __name__ == '__main__':
# loading_data
print('data loading...')
train = pd.read_csv('data/train.csv')
test =
|
pd.read_csv('data/test_a.csv')
|
pandas.read_csv
|
import pandas as pd
import os
from sklearn.metrics.pairwise import linear_kernel
from sklearn.feature_extraction.text import TfidfVectorizer
from recommend.models import Product, ProductTag, Estimate
PATH = os.getenv('FILE_PATH')
def make_user_tag_raw_string(user_id):
tags = ''
for estimate in Estimate.objects.all().filter(user_id=user_id).order_by('-estimate_rate')[:5]:
prod = estimate.prod
for product_tag in ProductTag.objects.all().filter(prod=prod):
tag = product_tag.tag.tag_text
tags += tag + ' '
return tags
def make_rec():
products =
|
pd.DataFrame(columns=['id', 'category', 'raw_tag'])
|
pandas.DataFrame
|
from ms_learn_crawler import *
import calendar
import time
import pandas as pd
import pickle
import os
data_month = 1
data_year = 2022
f = open("portfolio.config", "r")
portfolio_urls = f.readlines()
cert_info = {}
all_cert_lp_info = pd.DataFrame()
all_cert_module_info = pd.DataFrame()
crawler = ms_learn_crawler()
## Get all the LP info for each cert
cert_lp_pickle_file_name = "../data/"+str(data_month)+"-"+str(data_year)+"-all_cert_lp_info.pkl"
if(os.path.exists(cert_lp_pickle_file_name)):
#read from file to avoid reprocessing
with open(cert_lp_pickle_file_name, 'rb') as file:
# Call load method to deserialze
all_cert_lp_info = pickle.load(file)
else:
# do the processing
for cert in portfolio_urls:
learn_uids = crawler.get_learn_paths_for_cert(cert)
if len(learn_uids)>0:
lp_metadata = crawler.get_learn_path_metadata(learn_uids)
df = pd.DataFrame(lp_metadata, columns = ['LearningPathUid', 'LiveUrl','TotalModules'])
last_slash = cert.rfind("/")
cert_name = cert[last_slash+1:]
df['Certification'] = cert_name.strip()
if all_cert_lp_info.size == 0:
all_cert_lp_info = df
else:
all_cert_lp_info =
|
pd.concat([all_cert_lp_info,df],sort=False)
|
pandas.concat
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# turn off pink warning boxes
import warnings
warnings.filterwarnings("ignore")
#-----------------------------------------------------------------------------
def clean_flood(flood):
'''Drops unneeded columns from the med center flooding df
Makes sure DateTime is in DateTime format'''
# drop the columns
flood = flood.drop(columns=['LAT', 'LONG', 'Zone',
'SensorStatus', 'AlertTriggered',
'Temp_C', 'Temp_F', 'Vendor'])
# Set to date time format
flood.DateTime = pd.to_datetime(flood.DateTime)
flood = flood.rename(columns={"DateTime": "datetime",
"DistToWL_ft": "sensor_to_water_feet",
"DistToWL_m": "sensor_to_water_meters",
"DistToDF_ft": "sensor_to_ground_feet",
"DistToDF_m": "sensor_to_ground_meters"})
# replae -999 with 0
flood["sensor_to_ground_feet"].replace({-999:13.5006561680}, inplace=True)
flood["sensor_to_ground_meters"].replace({-999:4.115}, inplace=True)
#flood = flood.replace(to_replace=-999, value=0)
# create new features for flood depth
flood['flood_depth_feet'] = flood.sensor_to_ground_feet - flood.sensor_to_water_feet
flood['flood_depth_meters'] = flood.sensor_to_ground_meters - flood.sensor_to_water_meters
# Create new alert
def flood_alert(c):
if 0 < c['flood_depth_feet'] < 0.66667:
return 'No Risk'
elif 0.66667 < c['flood_depth_feet'] < 1.08333:
return 'Minor Risk'
elif 1.08333 < c['flood_depth_feet'] < 2.16667:
return 'Moderate Risk'
elif 2.16667 < c['flood_depth_feet']:
return 'Major Risk !'
else:
return 'No Alert'
flood['flood_alert'] = flood.apply(flood_alert, axis=1)
flood = flood[(flood.sensor_to_water_feet != -999)]
# return new df
return flood
#-----------------------------------------------------------------------------
def clean_air(air):
'''Drops unneeded columns from the air quality df
then handles the nulls in alert triggered column
set to date time format'''
# drop the colums
air = air.drop(columns=['LAT', 'LONG', 'Zone',
'Sensor_id', 'SensorModel',
'SensorStatus', 'Vendor'])
# replace nulls in ALertTriggered to None
air.fillna("None", inplace = True)
# set to date time format
air.DateTime = pd.to_datetime(air.DateTime)
# rename features
air = air.rename(columns={"DateTime": "datetime",
"AlertTriggered":"alert_triggered"})
air = air.replace(to_replace=-999, value=0)
# create time series features
air['dates'] = pd.to_datetime(air['datetime']).dt.date
air['time'] = pd.to_datetime(air['datetime']).dt.time
air['hour'] = pd.to_datetime(air['datetime']).dt.hour
air['weekday'] = pd.to_datetime(air['datetime']).dt.weekday
# make all CO bins
air['AQI_CO'] = pd.cut(air.CO,
bins = [-1,4.5,9.5,12.5,15.5,30.5,4000],
labels = ['Good', 'Moderate',
'Unhealthy for Sensitive Groups', "Unhealthy",
"Very Unhealthy", 'Hazardous'])
CO_24hr = air.groupby('dates', as_index=False)['CO'].mean()
CO_24hr = CO_24hr.rename(columns={'CO':'CO_24hr'})
air = air.merge(CO_24hr, on = 'dates', how ='left')
air['AQI_CO_24hr'] = pd.cut(air.CO_24hr,
bins = [-1,4.5,9.5,12.5,15.5,30.5,4000],
labels = ['Good', 'Moderate',
'Unhealthy for Sensitive Groups', "Unhealthy",
"Very Unhealthy", 'Hazardous'])
air['AQI_pm2_5'] = pd.cut(air.Pm2_5,
bins = [-1,12.1,35.5,55.5,150.5,250.5,4000],
labels = ['Good', 'Moderate',
'Unhealthy for Sensitive Groups', "Unhealthy",
"Very Unhealthy", 'Hazardous'])
pm_25_24hr = air.groupby('dates', as_index=False)['Pm2_5'].mean()
pm_25_24hr = pm_25_24hr.rename(columns={'Pm2_5':'Pm_25_24hr'})
air = air.merge(pm_25_24hr, on = 'dates', how ='left')
air['AQI_pm_25_24hr'] = pd.cut(air.Pm_25_24hr,
bins = [-1,12.1,35.5,55.5,150.5,250.5,4000],
labels = ['Good', 'Moderate',
'Unhealthy for Sensitive Groups', "Unhealthy",
"Very Unhealthy", 'Hazardous'])
air['AQI_pm10'] = pd.cut(air.Pm10,
bins = [-1,55,154,255,355,425,4000],
labels = ['Good', 'Moderate',
'Unhealthy for Sensitive Groups', "Unhealthy",
"Very Unhealthy", 'Hazardous'])
pm_10_24hr = air.groupby('dates', as_index=False)['Pm10'].mean()
pm_10_24hr = pm_10_24hr.rename(columns={'Pm10':'Pm_10_24hr'})
air = air.merge(pm_10_24hr, on = 'dates', how ='left')
air['AQI_pm10_24hr'] = pd.cut(air.Pm_10_24hr,
bins = [-1,55,154,255,355,425,4000],
labels = ['Good', 'Moderate',
'Unhealthy for Sensitive Groups', "Unhealthy",
"Very Unhealthy", 'Hazardous'])
return air
#-----------------------------------------------------------------------------
def wrangle_weather(weather):
'''
This function will drop unneccessary columns,
change datetime to a pandas datetime datatype,
and rename columns to be more readable to return
a clean dataframe.
'''
#read csv and turn into pandas dataframe
sa_weather = pd.read_csv('SA_weather.csv')
# concat sa date and time
sa_weather['Date_Time'] = sa_weather['Date'] + ' ' + sa_weather['Time']
# put into date time format
sa_weather.Date_Time = pd.to_datetime(sa_weather.Date_Time)
# round to nearest hour
sa_weather['DateTime'] = sa_weather['Date_Time'].dt.round('60min')
# set sa weather index
sa_weather = sa_weather.set_index('DateTime')
# drop old datetime
sa_weather = sa_weather.drop(columns=['Date_Time', 'Temp', 'Humidity', 'Barometer'])
# rename
sa_weather = sa_weather.rename(columns={"Time": "time",
"Date": "date",
"Weather": "weather",
"Wind": "wind",
"Visibility": "visibility"})
#drop columns we will not be using
weather.drop(columns=[
'Sensor_id',
'Vendor',
'SensorModel',
'LAT',
'LONG',
'Zone',
'AlertTriggered',
'SensorStatus'], inplace=True)
#rename columns to be more readable
weather = weather.rename(columns={"DateTime": "datetime",
"Temp_C": "celsius",
"Temp_F": "farenheit",
"Humidity": "humidity",
"DewPoint_C": "dewpoint_celsius",
"DewPoint_F": "dewpoint_farenheit",
"Pressure_Pa": "pressure"})
#change datetime to pandas datetime object
weather.datetime = pd.to_datetime(weather.datetime)
# round to hour
weather['DateTime'] = weather['datetime'].dt.round('60min')
# set index
weather = weather.set_index('DateTime')
# join the 2 df's
weather = weather.join(sa_weather, how='right')
# repalce -999
weather = weather.replace(to_replace=-999, value=0)
# drop nulls
weather.dropna(inplace = True)
#return clean weather df
return weather
#-----------------------------------------------------------------------------
def wrangle_sound(df):
'''
This function drops unnecessary columns and
converts the 'DateTime' column to a datetime
object
'''
# Drops unnecessary columns
df = df.drop(columns = ['SensorStatus', 'AlertTriggered', 'Zone', 'LONG',
'LAT', 'SensorModel', 'Vendor', 'Sensor_id'])
# Converts to datetime
df['DateTime'] = pd.to_datetime(df.DateTime)
# make noise level feature
df['how_loud'] = pd.cut(df.NoiseLevel_db,
bins = [-1,46,66,81,101,4000],
labels = ['Normal', 'Moderate',
'Loud', "Very Loud",
"Extremely Loud"])
def sound_alert(c):
if c['NoiseLevel_db'] > 80:
return 'Minor Risk'
elif c['NoiseLevel_db'] > 120:
return 'Major Risk'
else:
return 'No Alert'
df['sound_alert'] = df.apply(sound_alert, axis=1)
return df
#-----------------------------------------------------------------------------
def full_daily_downtown_COSA_dataframe():
'''
This function takes in all COSA dataframes,
averages them by day, then joins them all together
using the day as a primary key
'''
# Pulls sound CSV and sets datetime as index, then orders it
df = pd.read_csv('downtown_sound.csv')
sound_df = wrangle_sound(df)
sound_df = sound_df.set_index('DateTime')
sound_df = sound_df.sort_index()
# Pulls flood CSV and sets datetime as index
flood = pd.read_csv('downtown_flood.csv')
flood_df = clean_flood(flood)
flood_df = flood_df.set_index('datetime')
# Pulls weather CSV
weather = pd.read_csv('downtown_weather.csv')
weather_df = wrangle_weather(weather)
# Pulls air CSV, sets datetime column to datetime object, sets it as an index, then sorts it
air = pd.read_csv('downtown_air.csv')
air_df = clean_air(air)
air_df.datetime = pd.to_datetime(air_df.datetime)
air_df = air_df.set_index('datetime')
air_df = air_df.sort_index()
# Resamples each dataframe by the day using mean, and drops unnecessary columns from air_df
weather_day_df = weather_df.resample('D', on='datetime').mean()
flood_day_df = flood_df.resample('D').mean()
sound_day_df = sound_df.resample('D').mean()
air_day_df = air_df.resample('D').mean().drop(columns = ['hour', 'weekday', 'CO_24hr', 'Pm_25_24hr', 'Pm_10_24hr', 'SO2', 'O3', 'NO2'])
# Creating series for each pollutant
air2_5 = air_df.drop(air_df.columns.difference(['Pm2_5', 'AQI_pm2_5']), 1)
air10 = air_df.drop(air_df.columns.difference(['Pm10', 'AQI_pm10']), 1)
airCO = air_df.drop(air_df.columns.difference(['CO', 'AQI_CO']), 1)
# Pull most hazardous levels of pollution for each day
series2_5 = air2_5.resample('D').max().rename(columns = {'AQI_pm2_5': 'most_hazardous_pm2.5_level'})['most_hazardous_pm2.5_level']
series10 = air10.resample('D').max().rename(columns = {'AQI_pm10': 'most_hazardous_pm10_level'})['most_hazardous_pm10_level']
seriesCO = airCO.resample('D').max().rename(columns = {'AQI_CO': 'most_hazardous_CO_level'})['most_hazardous_CO_level']
# Joins the series together in a dataframe
hazards = pd.DataFrame(series2_5).join(series10).join(seriesCO)
# Joins the resampled dataframes together
df = weather_day_df.join(air_day_df).join(hazards).join(sound_day_df).join(flood_day_df)
# Rounds numbers in specific columns
df = df.round({'celsius': 2, 'farenheit': 2, 'humidity': 2, 'dewpoint_celsius': 2, 'dewpoint_farenheit': 2,
'pressure': 2, 'NoiseLevel_db': 2, 'sensor_to_water_feet': 2, 'sensor_to_water_meters': 2,
'sensor_to_ground_feet': 2, 'sensor_to_ground_meters': 2, 'flood_depth_feet': 2,
'flood_depth_meters': 2})
# Create AQI for CO
df['AQI_CO'] = pd.cut(df.CO,
bins = [-1,4.5,9.5,12.5,15.5,30.5,4000],
labels = ['Good', 'Moderate',
'Unhealthy for Sensitive Groups', "Unhealthy",
"Very Unhealthy", 'Hazardous'])
# create AQi for pm 2.5
df['AQI_pm2_5'] = pd.cut(df.Pm2_5,
bins = [-1,12.1,35.5,55.5,150.5,250.5,4000],
labels = ['Good', 'Moderate',
'Unhealthy for Sensitive Groups', "Unhealthy",
"Very Unhealthy", 'Hazardous'])
# create AQI for pm 10
df['AQI_pm10'] = pd.cut(df.Pm10,
bins = [-1,55,154,255,355,425,4000],
labels = ['Good', 'Moderate',
'Unhealthy for Sensitive Groups', "Unhealthy",
"Very Unhealthy", 'Hazardous'])
return df
#-----------------------------------------------------------------------------
def full_daily_medcenter_COSA_dataframe():
'''
This function takes in all COSA dataframes,
averages them by day, then joins them all together
using the day as a primary key
'''
# Pulls sound CSV and sets datetime as index, then orders it
df = pd.read_csv('med_center_sound.csv')
sound_df = wrangle_sound(df)
sound_df = sound_df.set_index('DateTime')
sound_df = sound_df.sort_index()
# Pulls flood CSV and sets datetime as index
flood = pd.read_csv('med_center_flood.csv')
flood_df = clean_flood(flood)
flood_df = flood_df.set_index('datetime')
# Pulls weather CSV
weather = pd.read_csv('med_center_weather.csv')
weather_df = wrangle_weather(weather)
# Pulls air CSV, sets datetime column to datetime object, sets it as an index, then sorts it
air = pd.read_csv('med_center_air.csv')
air_df = clean_air(air)
air_df.datetime = pd.to_datetime(air_df.datetime)
air_df = air_df.set_index('datetime')
air_df = air_df.sort_index()
# Resamples each dataframe by the day using mean, and drops unnecessary columns from air_df
weather_day_df = weather_df.resample('D', on='datetime').mean()
flood_day_df = flood_df.resample('D').mean()
sound_day_df = sound_df.resample('D').mean()
air_day_df = air_df.resample('D').mean().drop(columns = ['hour', 'weekday', 'CO_24hr', 'Pm_25_24hr', 'Pm_10_24hr', 'SO2', 'O3', 'NO2'])
# Creating series for each pollutant
air2_5 = air_df.drop(air_df.columns.difference(['Pm2_5', 'AQI_pm2_5']), 1)
air10 = air_df.drop(air_df.columns.difference(['Pm10', 'AQI_pm10']), 1)
airCO = air_df.drop(air_df.columns.difference(['CO', 'AQI_CO']), 1)
# Pull most hazardous levels of pollution for each day
series2_5 = air2_5.resample('D').max().rename(columns = {'AQI_pm2_5': 'most_hazardous_pm2.5_level'})['most_hazardous_pm2.5_level']
series10 = air10.resample('D').max().rename(columns = {'AQI_pm10': 'most_hazardous_pm10_level'})['most_hazardous_pm10_level']
seriesCO = airCO.resample('D').max().rename(columns = {'AQI_CO': 'most_hazardous_CO_level'})['most_hazardous_CO_level']
# Joins the series together in a dataframe
hazards = pd.DataFrame(series2_5).join(series10).join(seriesCO)
# Joins the resampled dataframes together
df = weather_day_df.join(air_day_df).join(hazards).join(sound_day_df).join(flood_day_df)
# Rounds numbers in specific columns
df = df.round({'celsius': 2, 'farenheit': 2, 'humidity': 2, 'dewpoint_celsius': 2, 'dewpoint_farenheit': 2,
'pressure': 2, 'NoiseLevel_db': 2, 'sensor_to_water_feet': 2, 'sensor_to_water_meters': 2,
'sensor_to_ground_feet': 2, 'sensor_to_ground_meters': 2, 'flood_depth_feet': 2,
'flood_depth_meters': 2})
# Create AQI for CO
df['AQI_CO'] = pd.cut(df.CO,
bins = [-1,4.5,9.5,12.5,15.5,30.5,4000],
labels = ['Good', 'Moderate',
'Unhealthy for Sensitive Groups', "Unhealthy",
"Very Unhealthy", 'Hazardous'])
# create AQi for pm 2.5
df['AQI_pm2_5'] = pd.cut(df.Pm2_5,
bins = [-1,12.1,35.5,55.5,150.5,250.5,4000],
labels = ['Good', 'Moderate',
'Unhealthy for Sensitive Groups', "Unhealthy",
"Very Unhealthy", 'Hazardous'])
# create AQI for pm 10
df['AQI_pm10'] = pd.cut(df.Pm10,
bins = [-1,55,154,255,355,425,4000],
labels = ['Good', 'Moderate',
'Unhealthy for Sensitive Groups', "Unhealthy",
"Very Unhealthy", 'Hazardous'])
return df
#-----------------------------------------------------------------------------
def full_daily_brooks_COSA_dataframe():
'''
This function takes in all COSA dataframes,
averages them by day, then joins them all together
using the day as a primary key
'''
# Pulls sound CSV and sets datetime as index, then orders it
df =
|
pd.read_csv('brooks_sound.csv')
|
pandas.read_csv
|
from typing import Dict, List
import pandas as pd
import pytest
from ruamel.yaml import YAML
import great_expectations.exceptions as ge_exceptions
from great_expectations.core.batch import (
Batch,
BatchDefinition,
BatchRequest,
PartitionDefinition,
)
from great_expectations.data_context.util import instantiate_class_from_config
from great_expectations.datasource.new_datasource import Datasource
yaml = YAML()
@pytest.fixture
def basic_datasource_with_runtime_data_connector():
basic_datasource: Datasource = instantiate_class_from_config(
yaml.load(
f"""
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
test_runtime_data_connector:
module_name: great_expectations.datasource.data_connector
class_name: RuntimeDataConnector
runtime_keys:
- pipeline_stage_name
- airflow_run_id
- custom_key_0
""",
),
runtime_environment={"name": "my_datasource"},
config_defaults={"module_name": "great_expectations.datasource"},
)
return basic_datasource
def test_basic_datasource_runtime_data_connector_self_check(
basic_datasource_with_runtime_data_connector,
):
report = basic_datasource_with_runtime_data_connector.self_check()
assert report == {
"execution_engine": {
"caching": True,
"module_name": "great_expectations.execution_engine.pandas_execution_engine",
"class_name": "PandasExecutionEngine",
"discard_subset_failing_expectations": False,
"boto3_options": {},
},
"data_connectors": {
"count": 1,
"test_runtime_data_connector": {
"class_name": "RuntimeDataConnector",
"data_asset_count": 0,
"example_data_asset_names": [],
"data_assets": {},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
},
},
}
def test_basic_datasource_runtime_data_connector_error_checking(
basic_datasource_with_runtime_data_connector,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
# Test for an unknown datasource
with pytest.raises(ValueError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = basic_datasource_with_runtime_data_connector.get_batch_list_from_batch_request(
batch_request=BatchRequest(
datasource_name="non_existent_datasource",
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
)
)
# Test for an unknown data_connector
with pytest.raises(ValueError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = basic_datasource_with_runtime_data_connector.get_batch_list_from_batch_request(
batch_request=BatchRequest(
datasource_name=basic_datasource_with_runtime_data_connector.name,
data_connector_name="non_existent_data_connector",
data_asset_name="my_data_asset",
)
)
# Test for illegal absence of partition_request when batch_data is specified
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = basic_datasource_with_runtime_data_connector.get_batch_list_from_batch_request(
batch_request=BatchRequest(
datasource_name=basic_datasource_with_runtime_data_connector.name,
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
batch_data=test_df,
partition_request=None,
)
)
# Test for illegal nullity of partition_request["partition_identifiers"] when batch_data is specified
partition_request: dict = {"partition_identifiers": None}
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = basic_datasource_with_runtime_data_connector.get_batch_list_from_batch_request(
batch_request=BatchRequest(
datasource_name=basic_datasource_with_runtime_data_connector.name,
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
batch_data=test_df,
partition_request=partition_request,
)
)
# Test for illegal falsiness of partition_request["partition_identifiers"] when batch_data is specified
partition_request: dict = {"partition_identifiers": {}}
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_list: List[
Batch
] = basic_datasource_with_runtime_data_connector.get_batch_list_from_batch_request(
batch_request=BatchRequest(
datasource_name=basic_datasource_with_runtime_data_connector.name,
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
batch_data=test_df,
partition_request=partition_request,
)
)
def test_partition_request_and_runtime_keys_success_all_keys_present(
basic_datasource_with_runtime_data_connector,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
partition_request: dict
partition_request = {
"batch_identifiers": {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
}
}
# Verify that all keys in partition_request are acceptable as runtime_keys (using batch count).
batch_request: dict = {
"datasource_name": basic_datasource_with_runtime_data_connector.name,
"data_connector_name": "test_runtime_data_connector",
"data_asset_name": "IN_MEMORY_DATA_ASSET",
"batch_data": test_df,
"partition_request": partition_request,
"limit": None,
}
batch_request: BatchRequest = BatchRequest(**batch_request)
batch_list: List[
Batch
] = basic_datasource_with_runtime_data_connector.get_batch_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_list) == 1
def test_partition_request_and_runtime_keys_error_illegal_keys(
basic_datasource_with_runtime_data_connector,
):
test_df: pd.DataFrame =
|
pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
|
pandas.DataFrame
|
import pytz
import pytest
import dateutil
import warnings
import numpy as np
from datetime import timedelta
from itertools import product
import pandas as pd
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas.core.indexes.datetimes import cdate_range
from pandas import (DatetimeIndex, PeriodIndex, Series, Timestamp, Timedelta,
date_range, TimedeltaIndex, _np_version_under1p10, Index,
datetime, Float64Index, offsets, bdate_range)
from pandas.tseries.offsets import BMonthEnd, CDay, BDay
from pandas.tests.test_base import Ops
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setup_method(self, method):
super(TestDatetimeIndexOps, self).setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
pytest.raises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
pytest.raises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
assert idx1.is_monotonic
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timestamp('2011-01-01', tz=tz)
assert idx.max() == Timestamp('2011-01-03', tz=tz)
assert idx.argmin() == 0
assert idx.argmax() == 2
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
assert pd.isna(getattr(obj, op)())
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
assert np.min(dr) == Timestamp('2016-01-15 00:00:00', freq='D')
assert np.max(dr) == Timestamp('2016-01-20 00:00:00', freq='D')
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, errmsg, np.min, dr, out=0)
tm.assert_raises_regex(ValueError, errmsg, np.max, dr, out=0)
assert np.argmin(dr) == 0
assert np.argmax(dr) == 5
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(
ValueError, errmsg, np.argmin, dr, out=0)
tm.assert_raises_regex(
ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
assert elt.round(freq='H') == expected_elt
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assert_raises_regex(ValueError, msg):
rng.round(freq='foo')
with tm.assert_raises_regex(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assert_raises_regex(ValueError, msg, rng.round, freq='M')
tm.assert_raises_regex(ValueError, msg, elt.round, freq='M')
# GH 14440 & 15578
index = pd.DatetimeIndex(['2016-10-17 12:00:00.0015'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.002000'], tz=tz)
tm.assert_index_equal(result, expected)
for freq in ['us', 'ns']:
tm.assert_index_equal(index, index.round(freq))
index = pd.DatetimeIndex(['2016-10-17 12:00:00.00149'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001000'], tz=tz)
tm.assert_index_equal(result, expected)
index = pd.DatetimeIndex(['2016-10-17 12:00:00.001501031'])
result = index.round('10ns')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001501030'])
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning():
ts = '2016-10-17 12:00:00.001501031'
pd.DatetimeIndex([ts]).round('1010ns')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
assert result.freq is None
assert len(result) == 5 * len(rng)
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assert_raises_regex(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
assert result == expected
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
assert result == expected
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
assert result == expected
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
assert idx.resolution == expected
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [
|
pd.offsets.Hour(2)
|
pandas.offsets.Hour
|
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
from copy import deepcopy
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt.utils.random_ import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.portfolio['attach_call_seq'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_execute_order_nb():
# Errors, ignored and rejected orders
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(-100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.nan, 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.inf, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.nan, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., np.nan, 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., -10., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., np.nan, 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., -100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, -10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=0))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=-10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=np.nan))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., np.nan, 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., -10., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(0, 10))
assert exec_state == ExecuteOrderState(cash=100.0, position=10.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(15, 10, max_size=10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=1.))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100, 0., np.inf, np.nan, 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(100, 10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-200, 10, direction=Direction.LongOnly, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, fixed_fees=1000))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=180.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=909.0, position=-100.0, debt=900.0, free_cash=-891.0)
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=7.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=125.0, position=-2.5, debt=25.0, free_cash=75.0)
assert_same_tuple(order_result, OrderResult(
size=7.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-2.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=-2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-7.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-10.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(150., -5., 0., 150., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=300.0, position=-20.0, debt=150.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=50.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(1000., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 17.5, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=850.0, position=3.571428571428571, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.571428571428571, price=17.5, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 100, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=37.5, position=-4.375, debt=43.75, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=0.625, price=100.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 10., 0., -50., 10., 100., 0, 0),
nb.order_nb(-20, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=150.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 1., 0., -50., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=10.0, position=0.0, debt=0.0, free_cash=-40.0)
assert_same_tuple(order_result, OrderResult(
size=1.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., -100., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=-100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-20, 10, fees=0.1, slippage=0.1, fixed_fees=1., lock_cash=True))
assert exec_state == ExecuteOrderState(cash=80.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_orders ############# #
order_size = pd.Series([np.inf, -np.inf, np.nan, np.inf, -np.inf], index=price.index)
order_size_wide = order_size.vbt.tile(3, keys=['a', 'b', 'c'])
order_size_one = pd.Series([1, -1, np.nan, 1, -1], index=price.index)
def from_orders_both(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='both', **kwargs)
def from_orders_longonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='longonly', **kwargs)
def from_orders_shortonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='shortonly', **kwargs)
class TestFromOrders:
def test_one_column(self):
record_arrays_close(
from_orders_both().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_orders_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1), (8, 2, 0, 100.0, 1.0, 0.0, 0),
(9, 2, 1, 100.0, 2.0, 0.0, 1), (10, 2, 3, 50.0, 4.0, 0.0, 0), (11, 2, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0), (4, 2, 0, 100.0, 1.0, 0.0, 1), (5, 2, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_size_inf(self):
record_arrays_close(
from_orders_both(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_orders_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 198.01980198019803, 2.02, 0.0, 1),
(2, 0, 3, 99.00990099009901, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 1),
(2, 0, 3, 49.504950495049506, 4.04, 0.0, 0), (3, 0, 4, 49.504950495049506, 5.05, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1),
(2, 0, 3, 50.0, 4.0, 0.0, 0), (3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 0), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 1), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.2, 0),
(6, 1, 3, 1.0, 4.0, 0.4, 1), (7, 1, 4, 1.0, 5.0, 0.5, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 2.0, 0), (10, 2, 3, 1.0, 4.0, 4.0, 1), (11, 2, 4, 1.0, 5.0, 5.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.1, 0),
(6, 1, 3, 1.0, 4.0, 0.1, 1), (7, 1, 4, 1.0, 5.0, 0.1, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 1.0, 0), (10, 2, 3, 1.0, 4.0, 1.0, 1), (11, 2, 4, 1.0, 5.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_orders_both(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 0.9, 0.0, 1), (5, 1, 1, 1.0, 2.2, 0.0, 0),
(6, 1, 3, 1.0, 3.6, 0.0, 1), (7, 1, 4, 1.0, 5.5, 0.0, 0), (8, 2, 0, 1.0, 0.0, 0.0, 1),
(9, 2, 1, 1.0, 4.0, 0.0, 0), (10, 2, 3, 1.0, 0.0, 0.0, 1), (11, 2, 4, 1.0, 10.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0), (2, 0, 3, 0.5, 4.0, 0.0, 1),
(3, 0, 4, 0.5, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0), (8, 2, 0, 1.0, 1.0, 0.0, 1),
(9, 2, 1, 1.0, 2.0, 0.0, 0), (10, 2, 3, 1.0, 4.0, 0.0, 1), (11, 2, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_orders_both(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 1, 1.0, 2.0, 0.0, 1), (5, 1, 3, 1.0, 4.0, 0.0, 0),
(6, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 3, 1.0, 4.0, 0.0, 0), (5, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_lock_cash(self):
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[143.12812469365747, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-49.5, -49.5]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[94.6034702480149, 47.54435839623566]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[49.5, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[1.4312812469365748, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-96.16606313106556, -96.16606313106556]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[0.4699090272918124, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[98.06958012596222, 98.06958012596222]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = from_orders_both(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 1000., 2., 0., 1),
(2, 0, 3, 500., 4., 0., 0), (3, 0, 4, 1000., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 200., 2., 0., 1),
(6, 1, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-6600.0, 0.0]
])
)
pf = from_orders_longonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 100., 2., 0., 1),
(2, 0, 3, 50., 4., 0., 0), (3, 0, 4, 50., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 100., 2., 0., 1),
(6, 1, 3, 50., 4., 0., 0), (7, 1, 4, 50., 5., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[200.0, 200.0],
[200.0, 200.0],
[0.0, 0.0],
[250.0, 250.0]
])
)
pf = from_orders_shortonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1000., 1., 0., 1), (1, 0, 1, 550., 2., 0., 0),
(2, 0, 3, 1000., 4., 0., 1), (3, 0, 4, 800., 5., 0., 0),
(4, 1, 0, 100., 1., 0., 1), (5, 1, 1, 100., 2., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[-900.0, 0.0],
[-900.0, 0.0],
[-900.0, 0.0],
[-4900.0, 0.0],
[-3989.6551724137926, 0.0]
])
)
def test_allow_partial(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1), (4, 1, 1, 1000.0, 2.0, 0.0, 1), (5, 1, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0), (4, 1, 0, 1000.0, 1.0, 0.0, 1), (5, 1, 3, 1000.0, 4.0, 0.0, 1),
(6, 1, 4, 1000.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_orders_both(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_longonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_shortonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_orders_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0,
100.0, 0.0, 0.0, 1.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 1, 0.0, 100.0, 0.0, 0.0, 2.0, 200.0, -np.inf, 2.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 2.0, 200.0, 200.0, 2.0, 0.0, 1, 0, -1, 1),
(2, 0, 0, 2, 400.0, -100.0, 200.0, 0.0, 3.0, 100.0, np.nan, 3.0, 0,
2, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 3.0, 100.0, np.nan, np.nan, np.nan, -1, 1, 0, -1),
(3, 0, 0, 3, 400.0, -100.0, 200.0, 0.0, 4.0, 0.0, np.inf, 4.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 4.0, 0.0, 100.0, 4.0, 0.0, 0, 0, -1, 2),
(4, 0, 0, 4, 0.0, 0.0, 0.0, 0.0, 5.0, 0.0, -np.inf, 5.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 5.0, 0.0, np.nan, np.nan, np.nan, -1, 2, 6, -1)
], dtype=log_dt)
)
def test_group_by(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
size=pd.DataFrame([
[0., 0., np.inf],
[0., np.inf, -np.inf],
[np.inf, -np.inf, 0.],
[-np.inf, 0., np.inf],
[0., np.inf, -np.inf],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_orders_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
def test_value(self):
record_arrays_close(
from_orders_both(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0),
(2, 0, 3, 0.25, 4.0, 0.0, 1), (3, 0, 4, 0.2, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_amount(self):
record_arrays_close(
from_orders_both(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=75., size_type='targetamount',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_value(self):
record_arrays_close(
from_orders_both(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 25.0, 2.0, 0.0, 0), (7, 1, 2, 8.333333333333332, 3.0, 0.0, 0),
(8, 1, 3, 4.166666666666668, 4.0, 0.0, 0), (9, 1, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 25.0, 2.0, 0.0, 0),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 0), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 0),
(4, 0, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=50., size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0),
(2, 0, 1, 25.0, 2.0, 0.0, 1), (3, 1, 1, 25.0, 2.0, 0.0, 1),
(4, 2, 1, 25.0, 2.0, 0.0, 0), (5, 0, 2, 8.333333333333332, 3.0, 0.0, 1),
(6, 1, 2, 8.333333333333332, 3.0, 0.0, 1), (7, 2, 2, 8.333333333333332, 3.0, 0.0, 1),
(8, 0, 3, 4.166666666666668, 4.0, 0.0, 1), (9, 1, 3, 4.166666666666668, 4.0, 0.0, 1),
(10, 2, 3, 4.166666666666668, 4.0, 0.0, 1), (11, 0, 4, 2.5, 5.0, 0.0, 1),
(12, 1, 4, 2.5, 5.0, 0.0, 1), (13, 2, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
def test_target_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 37.5, 2.0, 0.0, 0), (7, 1, 2, 6.25, 3.0, 0.0, 0), (8, 1, 3, 2.34375, 4.0, 0.0, 0),
(9, 1, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 37.5, 2.0, 0.0, 0), (2, 0, 2, 6.25, 3.0, 0.0, 0),
(3, 0, 3, 2.34375, 4.0, 0.0, 0), (4, 0, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_update_value(self):
record_arrays_close(
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=False).order_records,
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=True).order_records
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=False).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.9465661198057499, 2.02, 0.019120635620076154, 0),
(4, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(5, 1, 2, 0.018558300554959377, 3.0300000000000002, 0.0005623165068152705, 0),
(6, 0, 3, 0.00037870218456959037, 3.96, 1.4996606508955778e-05, 1),
(7, 1, 3, 0.0003638525743521767, 4.04, 1.4699644003827875e-05, 0),
(8, 0, 4, 7.424805112066224e-06, 4.95, 3.675278530472781e-07, 1),
(9, 1, 4, 7.133664827307231e-06, 5.05, 3.6025007377901643e-07, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.7303208018821721, 2.02, 0.014752480198019875, 0),
(4, 2, 1, 0.21624531792357785, 2.02, 0.0043681554220562635, 0),
(5, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(6, 1, 2, 0.009608602243410758, 2.9699999999999998, 0.00028537548662929945, 1),
(7, 2, 2, 0.02779013180558861, 3.0300000000000002, 0.0008420409937093393, 0),
(8, 0, 3, 0.0005670876809631409, 3.96, 2.2456672166140378e-05, 1),
(9, 1, 3, 0.00037770350099464167, 3.96, 1.4957058639387809e-05, 1),
(10, 2, 3, 0.0009077441794302741, 4.04, 3.6672864848982974e-05, 0),
(11, 0, 4, 1.8523501267964093e-05, 4.95, 9.169133127642227e-07, 1),
(12, 1, 4, 1.2972670177191503e-05, 4.95, 6.421471737709794e-07, 1),
(13, 2, 4, 3.0261148547590434e-05, 5.05, 1.5281880016533242e-06, 0)
], dtype=order_dt)
)
def test_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0), (5, 1, 0, 50., 1., 0., 1),
(6, 1, 1, 12.5, 2., 0., 1), (7, 1, 2, 4.16666667, 3., 0., 1),
(8, 1, 3, 1.5625, 4., 0., 1), (9, 1, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 1, 12.5, 2., 0., 1),
(2, 0, 2, 4.16666667, 3., 0., 1), (3, 0, 3, 1.5625, 4., 0., 1),
(4, 0, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 5.00000000e+01, 1., 0., 0), (1, 1, 0, 2.50000000e+01, 1., 0., 0),
(2, 2, 0, 1.25000000e+01, 1., 0., 0), (3, 0, 1, 3.12500000e+00, 2., 0., 0),
(4, 1, 1, 1.56250000e+00, 2., 0., 0), (5, 2, 1, 7.81250000e-01, 2., 0., 0),
(6, 0, 2, 2.60416667e-01, 3., 0., 0), (7, 1, 2, 1.30208333e-01, 3., 0., 0),
(8, 2, 2, 6.51041667e-02, 3., 0., 0), (9, 0, 3, 2.44140625e-02, 4., 0., 0),
(10, 1, 3, 1.22070312e-02, 4., 0., 0), (11, 2, 3, 6.10351562e-03, 4., 0., 0),
(12, 0, 4, 2.44140625e-03, 5., 0., 0), (13, 1, 4, 1.22070312e-03, 5., 0., 0),
(14, 2, 4, 6.10351562e-04, 5., 0., 0)
], dtype=order_dt)
)
def test_auto_seq(self):
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value, size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value / 100, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
def test_max_orders(self):
_ = from_orders_both(close=price_wide)
_ = from_orders_both(close=price_wide, max_orders=9)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, max_orders=8)
def test_max_logs(self):
_ = from_orders_both(close=price_wide, log=True)
_ = from_orders_both(close=price_wide, log=True, max_logs=15)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, log=True, max_logs=14)
# ############# from_signals ############# #
entries = pd.Series([True, True, True, False, False], index=price.index)
entries_wide = entries.vbt.tile(3, keys=['a', 'b', 'c'])
exits = pd.Series([False, False, True, True, True], index=price.index)
exits_wide = exits.vbt.tile(3, keys=['a', 'b', 'c'])
def from_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='both', **kwargs)
def from_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='longonly', **kwargs)
def from_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='shortonly', **kwargs)
def from_ls_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, False, exits, False, **kwargs)
def from_ls_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, False, False, **kwargs)
def from_ls_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, False, False, entries, exits, **kwargs)
class TestFromSignals:
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_one_column(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_multiple_columns(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 200., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 100., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0),
(2, 1, 0, 100., 1., 0., 1), (3, 1, 3, 50., 4., 0., 0),
(4, 2, 0, 100., 1., 0., 1), (5, 2, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_custom_signal_func(self):
@njit
def signal_func_nb(c, long_num_arr, short_num_arr):
long_num = nb.get_elem_nb(c, long_num_arr)
short_num = nb.get_elem_nb(c, short_num_arr)
is_long_entry = long_num > 0
is_long_exit = long_num < 0
is_short_entry = short_num > 0
is_short_exit = short_num < 0
return is_long_entry, is_long_exit, is_short_entry, is_short_exit
pf_base = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
entries=pd.Series([True, False, False, False, False]),
exits=pd.Series([False, False, True, False, False]),
short_entries=pd.Series([False, True, False, True, False]),
short_exits=pd.Series([False, False, False, False, True]),
size=1,
upon_opposite_entry='ignore'
)
pf = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
signal_func_nb=signal_func_nb,
signal_args=(vbt.Rep('long_num_arr'), vbt.Rep('short_num_arr')),
broadcast_named_args=dict(
long_num_arr=pd.Series([1, 0, -1, 0, 0]),
short_num_arr=pd.Series([0, 1, 0, 1, -1])
),
size=1,
upon_opposite_entry='ignore'
)
record_arrays_close(
pf_base.order_records,
pf.order_records
)
def test_amount(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 2.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_value(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 0.3125, 4.0, 0.0, 1),
(2, 1, 4, 0.1775, 5.0, 0.0, 1), (3, 2, 0, 100.0, 1.0, 0.0, 0),
(4, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
with pytest.raises(Exception):
_ = from_signals_both(size=0.5, size_type='percent')
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1), (2, 0, 4, 25., 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close',
accumulate=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 0),
(2, 0, 3, 62.5, 4.0, 0.0, 1), (3, 0, 4, 27.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 3, 37.5, 4., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 25., 1., 0., 0),
(2, 2, 0, 12.5, 1., 0., 0), (3, 0, 3, 50., 4., 0., 1),
(4, 1, 3, 25., 4., 0., 1), (5, 2, 3, 12.5, 4., 0., 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_signals_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 3, 198.01980198019803, 4.04, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099, 1.01, 0., 0), (1, 0, 3, 99.00990099, 4.04, 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 3, 49.504950495049506, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_signals_both(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.8, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 8.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.4, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 4.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.4, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 4.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_signals_both(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.1, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_signals_both(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 2.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 2.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 1.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 0.9, 0.0, 1),
(3, 1, 3, 1.0, 4.4, 0.0, 0), (4, 2, 0, 1.0, 0.0, 0.0, 1), (5, 2, 3, 1.0, 8.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_signals_both(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_signals_both(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 0, 4, 0.5, 5.0, 0.0, 1),
(3, 1, 0, 1.0, 1.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1), (4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 3, 0.5, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_signals_both(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1), (2, 1, 3, 1000.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 3, 275.0, 4.0, 0.0, 0), (2, 1, 0, 1000.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=True, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_both(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_signals_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2, 0.0, 0.0,
0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 100.0, 0.0, 0.0, 1.0,
100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 3, 0.0, 100.0, 0.0, 0.0, 4.0, 400.0, -np.inf, 4.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 800.0, -100.0,
400.0, 0.0, 4.0, 400.0, 200.0, 4.0, 0.0, 1, 0, -1, 1)
], dtype=log_dt)
)
def test_accumulate(self):
record_arrays_close(
from_signals_both(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 3.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 1.0, 4.0, 0.0, 1), (8, 2, 4, 1.0, 5.0, 0.0, 1),
(9, 3, 0, 1.0, 1.0, 0.0, 0), (10, 3, 1, 1.0, 2.0, 0.0, 0), (11, 3, 3, 1.0, 4.0, 0.0, 1),
(12, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 2.0, 4.0, 0.0, 1), (5, 2, 0, 1.0, 1.0, 0.0, 0),
(6, 2, 3, 1.0, 4.0, 0.0, 1), (7, 3, 0, 1.0, 1.0, 0.0, 0), (8, 3, 1, 1.0, 2.0, 0.0, 0),
(9, 3, 3, 1.0, 4.0, 0.0, 1), (10, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 1, 1.0, 2.0, 0.0, 1), (4, 1, 3, 2.0, 4.0, 0.0, 0), (5, 2, 0, 1.0, 1.0, 0.0, 1),
(6, 2, 3, 1.0, 4.0, 0.0, 0), (7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 1),
(9, 3, 3, 1.0, 4.0, 0.0, 0), (10, 3, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_long_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_long_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_longonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 1, 1.0, 2.0, 0.0, 0), (5, 2, 2, 1.0, 3.0, 0.0, 1),
(6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 0),
(8, 5, 1, 1.0, 2.0, 0.0, 0), (9, 5, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_short_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_short_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_shortonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 1),
(1, 1, 0, 1.0, 1.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 1, 1.0, 2.0, 0.0, 1), (5, 2, 2, 1.0, 3.0, 0.0, 0),
(6, 3, 1, 1.0, 2.0, 0.0, 1), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 5, 1, 1.0, 2.0, 0.0, 1), (9, 5, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_dir_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_dir_conflict=[[
'ignore',
'long',
'short',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 1, 1.0, 2.0, 0.0, 0), (6, 2, 2, 1.0, 3.0, 0.0, 1),
(7, 3, 1, 1.0, 2.0, 0.0, 0), (8, 3, 2, 1.0, 3.0, 0.0, 0),
(9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 1),
(11, 5, 1, 1.0, 2.0, 0.0, 0), (12, 5, 2, 1.0, 3.0, 0.0, 1),
(13, 6, 1, 1.0, 2.0, 0.0, 1), (14, 6, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_opposite_entry(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False]
]),
exits=pd.DataFrame([
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True]
]),
size=1.,
upon_opposite_entry=[[
'ignore',
'ignore',
'close',
'close',
'closereduce',
'closereduce',
'reverse',
'reverse',
'reversereduce',
'reversereduce'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 1),
(2, 2, 0, 1.0, 1.0, 0.0, 0), (3, 2, 1, 1.0, 2.0, 0.0, 1), (4, 2, 2, 1.0, 3.0, 0.0, 0),
(5, 3, 0, 1.0, 1.0, 0.0, 1), (6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 4, 0, 1.0, 1.0, 0.0, 0), (9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 0),
(11, 5, 0, 1.0, 1.0, 0.0, 1), (12, 5, 1, 1.0, 2.0, 0.0, 0), (13, 5, 2, 1.0, 3.0, 0.0, 1),
(14, 6, 0, 1.0, 1.0, 0.0, 0), (15, 6, 1, 2.0, 2.0, 0.0, 1), (16, 6, 2, 2.0, 3.0, 0.0, 0),
(17, 7, 0, 1.0, 1.0, 0.0, 1), (18, 7, 1, 2.0, 2.0, 0.0, 0), (19, 7, 2, 2.0, 3.0, 0.0, 1),
(20, 8, 0, 1.0, 1.0, 0.0, 0), (21, 8, 1, 2.0, 2.0, 0.0, 1), (22, 8, 2, 2.0, 3.0, 0.0, 0),
(23, 9, 0, 1.0, 1.0, 0.0, 1), (24, 9, 1, 2.0, 2.0, 0.0, 0), (25, 9, 2, 2.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(**kwargs, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 2, 1.0, 3.0, 0.0, 0),
(2, 1, 0, 1.0, 1.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 1, 1.0, 2.0, 0.0, 1), (6, 2, 2, 1.0, 3.0, 0.0, 0),
(7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 0), (9, 3, 2, 1.0, 3.0, 0.0, 1),
(10, 4, 0, 1.0, 1.0, 0.0, 0), (11, 4, 1, 1.0, 2.0, 0.0, 1), (12, 4, 2, 1.0, 3.0, 0.0, 0),
(13, 5, 0, 1.0, 1.0, 0.0, 1), (14, 5, 1, 1.0, 2.0, 0.0, 0), (15, 5, 2, 1.0, 3.0, 0.0, 1),
(16, 6, 0, 1.0, 1.0, 0.0, 0), (17, 6, 1, 2.0, 2.0, 0.0, 1), (18, 6, 2, 2.0, 3.0, 0.0, 0),
(19, 7, 0, 1.0, 1.0, 0.0, 1), (20, 7, 1, 2.0, 2.0, 0.0, 0), (21, 7, 2, 2.0, 3.0, 0.0, 1),
(22, 8, 0, 1.0, 1.0, 0.0, 0), (23, 8, 1, 1.0, 2.0, 0.0, 1), (24, 8, 2, 1.0, 3.0, 0.0, 0),
(25, 9, 0, 1.0, 1.0, 0.0, 1), (26, 9, 1, 1.0, 2.0, 0.0, 0), (27, 9, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_init_cash(self):
record_arrays_close(
from_signals_both(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 3, 1.0, 4.0, 0.0, 1), (1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 3, 2.0, 4.0, 0.0, 1),
(3, 2, 0, 1.0, 1.0, 0.0, 0), (4, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1), (2, 2, 0, 1.0, 1.0, 0.0, 0),
(3, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 0.25, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 0.5, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_both(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(init_cash=np.inf).order_records
def test_group_by(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
|
pd.Int64Index([0, 0, 1], dtype='int64')
|
pandas.Int64Index
|
"""
Copyright 2021 Novartis Institutes for BioMedical Research Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
import janitor
import janitor.chemistry
# nn stuff
import torch
# chem stuff
import rdkit.Chem as Chem
from rdkit import DataStructs, RDLogger
from rdkit.Chem import AllChem
from torch.utils import data
lg = RDLogger.logger()
lg.setLevel(RDLogger.CRITICAL)
def normalize_morgans(morgans):
anscombe = np.sqrt(morgans + (3.0 / 8.0)) * 2
max_count = 30
max_count = np.sqrt(max_count + (3.0 / 8.0)) * 2
normalized = anscombe / max_count
return normalized
class MorgansDataset(data.Dataset):
def __init__(self, morgans: pd.DataFrame, targets: pd.DataFrame):
"""
Create a Morgans dataset for use with PyTorch.
targets and morgans must be pandas DataFrames respectively
and they must be indexed by structure IDs.
Assumes that the targets, and morgans are indexed identically.
"""
# assert len(targets) == len(
# morgans
# ), "morgans and targets must be of the same length."
# assert ((targets.index == morgans.index).all());
self.targets = targets
self.morgans = morgans
self.list_IDs = morgans.index
# for real testing the dataset is constructed without a target??? Right ...
def __len__(self):
"""Return the total number of samples"""
return len(self.list_IDs)
def __getitem__(self, index):
"""Generates one sample of data
The index passed by the torch generator is an integer
which we have to remap to our own internal index ...
"""
# Load data and get target
structure_id = self.list_IDs[index]
normalized = normalize_morgans(self.morgans.loc[structure_id].values)
X = torch.from_numpy(normalized).float()
if self.targets is not None:
y = torch.from_numpy(np.array(self.targets.loc[structure_id])).float()
else:
y = torch.tensor(-1).float() # return a dummy variable
return X, y
def preprocess_redux(assay_data,
binary_fp=False,
filter_mols=True,
n_atoms_filter=50,
convert_to_pac50=False,
drop_qualified = True,
convert_fps_to_numpy = False):
toxdata = assay_data
if drop_qualified:
toxdata = toxdata.dropnotnull("qualifier")
toxdata = toxdata.transform_column("val", np.log10)
toxdata = toxdata.remove_columns(["qualifier"])
toxdata = toxdata.replace([np.inf, -np.inf], np.nan).dropna(subset=["val"])
if convert_to_pac50:
toxdata["val"] = (toxdata["val"] - 6) * -1
n_dropped = len(assay_data) - len(toxdata)
print(n_dropped)
morgans = {}
mols = {}
for idx in toxdata.index:
smiles = toxdata.smiles.loc[idx]
try:
mol = Chem.MolFromSmiles(smiles)
except:
print(smiles)
print("[ERROR] Could not create mol")
continue
try:
if binary_fp:
fp_array = AllChem.GetMorganFingerprint(mol, 2)
# TODO SOMETIMES THIS IS NEEDED
if convert_fps_to_numpy:
fp = AllChem.GetMorganFingerprintAsBitVect(mol,2,nBits=1024)
fp_array = np.zeros((0,), dtype=np.int8)
DataStructs.ConvertToNumpyArray(fp, fp_array)
else:
fp = AllChem.GetHashedMorganFingerprint(
mol, radius=3, nBits=2048, useChirality=True
)
#fp = AllChem.GetHashedMorganFingerprint(
# mol, radius=2, nBits=1024, useChirality=True
#)
fp_array = np.zeros((0,), dtype=np.int8)
DataStructs.ConvertToNumpyArray(fp, fp_array)
morgans[idx] = fp_array
mols[idx] = mol # don't add the molecule if the FP cannot be computed
except:
print("[ERROR] Could not create fingerprint")
continue
morgans_df =
|
pd.DataFrame.from_dict(morgans, orient="index")
|
pandas.DataFrame.from_dict
|
import xml.etree.ElementTree as ET
import warnings
import pandas as pd
import sys, os
import configparser
import numpy as np
import glob
from ..utils import XmlDictConfig, XmlListConfig
pd.set_option('mode.chained_assignment', None)
MAPPING_EVENT = {
'0':'Unknown',
'1': 'central_apnea',
'2': 'obstructive_apnea',
'3': 'mixed_apnea',
'4': 'desat',
'5': 'respiratory_artefact',
'6': 'spo2_artefact',
'7': 'arousal_t1',
'8': 'arousal_t2',
'9': 'arousal_t3',
'10': 'arousal_t4',
'11': 'arousal_t5',
'12': 'limb_left',
'13':'limb_right',
'14':'bradycardia',
'15':'tachycardia',
'16': 'tco2_artifact',
'17': 'etco2_artifact',
'18': 'distal_ph_artifact',
'19': 'distal_ph_event',
'20': 'proximal_ph_artifact',
'21': 'proximal_ph_event',
'22': 'blood_pressure_artifact',
'23': 'body_temp_artifact',
'24': 'unsure_resp_event',
'25': 'resp_paradox',
'26': 'periodic_breathing',
'27': 'PLM_episode',
'28': 'heart_rate_artifact',
'29': 'obstructive_hypopnea',
'30': 'central_hypopnea',
'31': 'mixed_hypopnea',
'32': 'RERA',
'33': 'snore_event',
'34': 'user_event_1',
'35': 'user_event_2',
'36': 'user_event_3',
'37': 'user_event_4',
'38': 'user_event_5',
'39': 'user_resp_event_1',
'40': 'user_resp_event_2',
'41': 'user_resp_event_3',
'42': 'user_resp_event_4',
'43': 'user_resp_event_5',
'44': 'delta_wave',
'45': 'spindles',
'46': 'left_eye_movement',
'47': 'left_eye_movement_anti_phase',
'48': 'left_eye_movement_phase',
'49': 'right_eye_movement',
'50': 'right_eye_movement_anti_phase',
'51': 'right_eye_movement_phase',
'52': 'PTT_event',
'53': 'PTT_artifact',
'54': 'asystole',
'55': 'wide_complex_tachycardia',
'56': 'narrow_complex_tachycardia',
'57': 'atrial_fibrilation',
'58': 'bruxism',
'59': 'SMA',
'60': 'TMA',
'61': 'rythmic_movement',
'62': 'ECG_artifact',
'63': 'CAP_A1',
'64': 'CAP_A2',
'65': 'CAP_A3',
'66': 'PES_artifact',
'67': 'CPAP_artifact',
'68': 'user_event_6',
'69': 'user_event_7',
'70': 'user_event_8',
'71': 'user_event_9',
'72': 'user_event_10',
'73': 'user_event_11',
'74': 'user_event_12',
'75': 'user_event_13',
'76': 'user_event_14',
'77': 'user_event_15',
'78':'transient_muscle_activity',
'79':'hypnagogic_foot_tremor',
'80': 'hypnagogic_foot_tremor_burst_left',
'81': 'hypnagogic_foot_tremor_burst_right',
'82': 'excessive_fragmentary_myolonus',
'83': 'alternating_leg_muscle_activation',
'84': 'rythmic_movement_burst',
'85': 'hyperventilation',
'86': 'excessive_fragment_myolonus_burst_left',
'87': 'excessive_fragment_myolonus_burst_right',
'88': 'hypoventilation',
}
def cpm_list_event():
event_list = [val for _,val in MAPPING_EVENT.items()]
return event_list
def _read_epoch_data(folder):
"""
Read PROCESS.ADV file in the compumedics folder.
Compumedics automatically run some epochs level analysis when recording (or
maybe when closing file?) such as spindles detection etc.. which is then
saved in Process.ADV file.
Returns
-------
epochdata : pd.DataFrame
30-seconds epoch-level of summary data (e.g. heart rate)
Notes
------
Some parameters (e.g. U15) have not yet been figured out.
"""
process_file = os.path.join(folder, 'PROCESS.ADV')
n = np.fromfile(process_file, dtype=np.int16)
number_of_epochs = n[0]
other_data = np.reshape(n[1:], (number_of_epochs, -1))
columns_names = ['Artifact', 'DeltaL', 'DeltaM', 'DeltaH', 'ThetaD',
'ThetaA',
'Alpha', 'Sigma', 'Beta', 'U10', 'Spindles', 'MeanSAO2',
'MinSAO2',
'MaxSAO2', 'U15', 'U16', 'Sound', 'REM', 'EMGamp', 'U20',
'CPAP',
'U22', 'HR', 'U24', 'U25', 'Posture', 'U27', 'U28', 'U29',
'U30',
'KC', 'U32', 'U33']
epochdata =
|
pd.DataFrame(data=other_data, columns=columns_names)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @author : microfat
# @time : 08/05/20 22:13:22
# @File : parseData.py
import re
import unicodedata
import pandas as pd
from bs4 import BeautifulSoup
from lxml import etree
from urllib.parse import urlparse
from gne import GeneralNewsExtractor
from pyhanlp import HanLP
extractor = GeneralNewsExtractor()
class ParseData:
def __init__(self):
pass
def get_city_code(self, source):
soup = BeautifulSoup(source, 'lxml')
city_dict = {}
for group in soup.find('div', {'id':'work_position_click_center_right'})\
.find_all('div', {'class':'work_position_click_center_right_list de d3'})[3:]:
group_dict = {}
for city_source in group.find_all('em'):
city_name = city_source.text
city_code = city_source['data-value']
group_dict[city_code] = city_name
city_dict = {**city_dict, **group_dict}
return city_dict
def get_indtype_code(self, source):
soup = BeautifulSoup(source, 'lxml')
indtype_dict = {}
indtype_key_dict = {}
indtype_value_dict = {}
for key_source in soup.find('ul', {'id':'indtype_click_center_left'})\
.find_all('li'):
indtype_key_key = key_source['data-value']
indtype_key_value = key_source.text
indtype_key_dict[indtype_key_key] = indtype_key_value
for group in soup.find('div', {'id':'indtype_click_center_right'})\
.find_all('div', {'class':'indtype_click_center_right_list de d3'}):
group_list = []
for indtype_source in group.find_all('em'):
indtype_key = indtype_source['data-navigation']
indtype_value = indtype_source['data-value']
indtype_value_value = indtype_source.text
group_list.append(indtype_value)
indtype_value_key = indtype_value
indtype_value_dict[indtype_value_key] = indtype_value_value
indtype_dict[indtype_key] = group_list
return indtype_dict, indtype_key_dict, indtype_value_dict
def get_province_code(self, source):
soup = BeautifulSoup(source, 'lxml')
province_dict = {}
for province_source in soup.find('div', {'id':'work_position_click_center_right'})\
.find('div', {'id':'work_position_click_center_right_list_030000'})\
.find_all('em'):
province_name = province_source.text
province_code = province_source['data-value']
province_dict[province_code] = province_name
return province_dict
def get_page_num(self, source):
page_num = int(source.json()['total_page'])
return page_num
def get_job_num(self, source):
job_num = len(source.json()['engine_search_result'])
return job_num
def get_job_info(self, source):
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""Perform classical functional region delimitation."""
import logging
from typing import Any, List, Tuple, Optional, TypeVar
import numpy as np
import pandas as pd
class EvaluatorInterface:
"""Objects that evaluate fitness of regions."""
def feed(self, interactions: pd.Series, unit_props: pd.DataFrame) -> None:
raise NotImplementedError
def eval_all(self, regions: pd.Series, cores: pd.Series) -> pd.DataFrame:
raise NotImplementedError
def get_required_properties(self) -> List[str]:
raise NotImplementedError
def get_criteria(self) -> List[str]:
raise NotImplementedError
class PropertylessEvaluator(EvaluatorInterface):
name: str = NotImplemented
def feed(self, interactions: pd.Series, unit_props: pd.DataFrame) -> None:
pass
def eval_all(self, regions: pd.Series, cores: pd.Series) -> pd.DataFrame:
return pd.DataFrame(self._compute(regions, cores).rename(self.name))
def get_required_properties(self) -> List[str]:
return []
def get_criteria(self) -> List[str]:
return [self.name]
def _compute(self, regions: pd.Series, cores: pd.Series) -> pd.Series:
raise NotImplementedError
class ConstantEvaluator(PropertylessEvaluator):
"""Evaluate all regions as 1."""
name = 'constant'
def _compute(self, regions: pd.Series, cores: pd.Series) -> pd.Series:
return pd.Series(1, index=regions.unique())
class UnitCountEvaluator(PropertylessEvaluator):
"""Evaluate regions by count of units."""
name = 'unit_count'
def _compute(self, regions: pd.Series, cores: pd.Series) -> pd.Series:
return regions.value_counts(sort=False)
class SourceFlowSumEvaluator(PropertylessEvaluator):
"""Evaluate regions by summing the magnitude of outgoing interactions."""
name = 'sourceflow_sum'
flowsums: pd.Series
def feed(self, interactions: pd.Series, unit_props: pd.DataFrame) -> None:
self.flowsums = interactions.groupby(level=0).sum()
def _compute(self, regions: pd.Series, cores: pd.Series) -> pd.Series:
return self.flowsums.groupby(regions).sum()
class PropertySumEvaluator(EvaluatorInterface):
"""Evaluate regions by summing a property of their constituent units."""
prop: pd.Series
def __init__(self, criterion):
self.criterion = criterion
def feed(self, interactions: pd.Series, unit_props: pd.DataFrame) -> None:
try:
self.prop = unit_props[self.criterion]
except KeyError as err:
raise LookupError(f'{self.criterion} unit property not specified') from err
def eval_all(self, regions: pd.Series, cores: pd.Series) -> pd.DataFrame:
return pd.DataFrame(self.prop.groupby(regions).sum().rename(self.criterion))
def get_required_properties(self) -> List[str]:
return [self.criterion]
def get_criteria(self) -> List[str]:
return [self.criterion]
class HinterlandSumEvaluator(PropertySumEvaluator):
"""Evaluate regions by summing a property of their non-core units."""
PREFIX = 'hinterland_'
def eval_all(self, regions: pd.Series, cores: pd.Series) -> pd.DataFrame:
return pd.DataFrame(
self.prop.groupby(regions).sum().sub(
self.prop[cores.index][cores].groupby(regions).sum(),
fill_value=0
).astype(self.prop.dtype).rename(self.PREFIX + self.criterion)
)
class CompoundEvaluator(EvaluatorInterface):
"""Evaluate regions by multiple values or criteria."""
def __init__(self, subevals: List[EvaluatorInterface]):
self.subevals = subevals
def feed(self, interactions: pd.Series, unit_props: pd.DataFrame) -> None:
for subeval in self.subevals:
subeval.feed(interactions, unit_props)
def eval_all(self, regions: pd.Series, cores: pd.Series) -> pd.DataFrame:
evaluations = self.subevals[0].eval_all(regions, cores)
for subeval in self.subevals[1:]:
subevaluation = subeval.eval_all(regions, cores)
for col in subevaluation.columns:
evaluations[col] = subevaluation[col]
return evaluations
def get_required_properties(self) -> List[str]:
return list(set(
prop
for subeval in self.subevals
for prop in subeval.get_required_properties()
))
def get_criteria(self) -> List[str]:
crits = []
for subeval in self.subevals:
for crit in subeval.get_criteria():
if crit not in crits:
crits.append(crit)
return crits
PROPERTYLESS_EVALUATORS = {
c.name: c for c in PropertylessEvaluator.__subclasses__()
}
def create_evaluator(criterion: List[str] = []) -> EvaluatorInterface:
if not criterion:
return ConstantEvaluator()
elif len(criterion) > 1:
return CompoundEvaluator([create_evaluator([critname]) for critname in criterion])
else:
criterion = criterion[0]
if criterion in PROPERTYLESS_EVALUATORS:
return PROPERTYLESS_EVALUATORS[criterion]()
elif criterion.startswith(HinterlandSumEvaluator.PREFIX):
return HinterlandSumEvaluator(criterion[len(HinterlandSumEvaluator.PREFIX):])
else:
return PropertySumEvaluator(criterion)
class VerifierInterface:
"""Objects that verify that the evaluation of a region is good enough."""
def verify(self, value: Any) -> bool:
raise NotImplementedError
class YesmanVerifier(VerifierInterface):
"""Always make all regions pass the criterion."""
@staticmethod
def verify(value: Any) -> bool:
return True
class MinimumVerifier(VerifierInterface):
"""Only allow those regions with an evaluation at least the given value."""
def __init__(self, threshold: float):
self.threshold = threshold
def verify(self, value: float) -> bool:
return value >= self.threshold
class CompoundAndVerifier(VerifierInterface):
"""Allow only those regions that satisfy all partial verifications."""
def __init__(self, partials: List[VerifierInterface]):
self.partials = partials
def verify(self, *args) -> bool:
return all(
partial.verify(value)
for partial, value in zip(self.partials, args)
)
class TargeterInterface:
pass
ID = TypeVar('ID')
class InteractionTargeter:
"""Select aggregation targets for units based on largest interaction."""
interactions: pd.Series
def __init__(self, source_core: bool = True, target_core: bool = True):
self.source_core = source_core
self.target_core = target_core
def feed(self, interactions: pd.Series, unit_props: pd.DataFrame) -> None:
self.interactions = interactions
def target(self, units: ID, regions: pd.Series, cores: pd.Series) -> ID:
"""Select a target for a single unit."""
strengths = self._get_strengths(units, regions, cores)
if strengths.empty:
return np.nan
else:
return strengths.groupby(level=1).sum().idxmax()
def targets(self, units: pd.Index, regions: pd.Series, cores: pd.Series) -> pd.Series:
"""Select targets for multiple units."""
strengths = self._get_strengths(units, regions, cores)
if strengths.empty:
return
|
pd.Series(np.nan, index=units)
|
pandas.Series
|
#
# This script takes all the model outputs and creates a dataframe with the final abundance of every species for every physics
# More useful than the final abundances.
#
from numpy.core.fromnumeric import compress
import pandas as pd
from glob import glob
from uclchem import read_output_file,check_abunds
from multiprocessing import Pool
from os import remove
def read_last_abunds(data_file):
a=read_output_file(data_file)
if (a["Time"].max()>9.99e5):
a=a.iloc[-1,5:-2].reset_index()
a.columns=["Species","Abundance"]
a["outputFile"]=data_file
a["Conserve C"]=check_conserve(data_file)
else:
a=pd.DataFrame()
return a
def check_conserve(data_file):
a=read_output_file(data_file)
result=check_abunds("C",a)
result=(result.iloc[0]-result.iloc[-1])/result.iloc[0]
return abs(result)<0.01
def remover(file_name):
try:
remove(file_name)
except:
pass
phase="three_phase"
models=pd.read_csv(f"data/{phase}/models.csv")
#not all completed!
models=models[models["outputFile"].isin(glob(f"data/{phase}/models/*"))]
with Pool(63) as pool:
a=pool.map(read_last_abunds,models["outputFile"].values)
a=pd.concat(list(a))
models=models.merge(a,on="outputFile")
models=models.dropna()
models.to_hdf(f"data/{phase}/final_abunds.hdf",key="df",mode="w")
completed=models["outputFile"].unique()
bad_c=list(models.loc[models["Conserve C"]==False,"outputFile"].unique())
models=
|
pd.read_csv(f"data/{phase}/models.csv")
|
pandas.read_csv
|
"""
test date_range, bdate_range construction from the convenience range functions
"""
from datetime import datetime, time, timedelta
import numpy as np
import pytest
import pytz
from pytz import timezone
from pandas._libs.tslibs import timezones
from pandas._libs.tslibs.offsets import BDay, CDay, DateOffset, MonthEnd, prefix_mapping
from pandas.errors import OutOfBoundsDatetime
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DatetimeIndex, Timestamp, bdate_range, date_range, offsets
import pandas._testing as tm
from pandas.core.arrays.datetimes import generate_range
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestTimestampEquivDateRange:
# Older tests in TestTimeSeries constructed their `stamp` objects
# using `date_range` instead of the `Timestamp` constructor.
# TestTimestampEquivDateRange checks that these are equivalent in the
# pertinent cases.
def test_date_range_timestamp_equiv(self):
rng = date_range("20090415", "20090519", tz="US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="US/Eastern", freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_dateutil(self):
rng = date_range("20090415", "20090519", tz="dateutil/US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="dateutil/US/Eastern", freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_explicit_pytz(self):
rng = date_range("20090415", "20090519", tz=pytz.timezone("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=pytz.timezone("US/Eastern"), freq="D")
assert ts == stamp
@td.skip_if_windows_python_3
def test_date_range_timestamp_equiv_explicit_dateutil(self):
from pandas._libs.tslibs.timezones import dateutil_gettz as gettz
rng = date_range("20090415", "20090519", tz=gettz("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=gettz("US/Eastern"), freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_from_datetime_instance(self):
datetime_instance = datetime(2014, 3, 4)
# build a timestamp with a frequency, since then it supports
# addition/subtraction of integers
timestamp_instance = date_range(datetime_instance, periods=1, freq="D")[0]
ts = Timestamp(datetime_instance, freq="D")
assert ts == timestamp_instance
def test_date_range_timestamp_equiv_preserve_frequency(self):
timestamp_instance = date_range("2014-03-05", periods=1, freq="D")[0]
ts = Timestamp("2014-03-05", freq="D")
assert timestamp_instance == ts
class TestDateRanges:
def test_date_range_nat(self):
# GH#11587
msg = "Neither `start` nor `end` can be NaT"
with pytest.raises(ValueError, match=msg):
date_range(start="2016-01-01", end=pd.NaT, freq="D")
with pytest.raises(ValueError, match=msg):
date_range(start=pd.NaT, end="2016-01-01", freq="D")
def test_date_range_multiplication_overflow(self):
# GH#24255
# check that overflows in calculating `addend = periods * stride`
# are caught
with tm.assert_produces_warning(None):
# we should _not_ be seeing a overflow RuntimeWarning
dti = date_range(start="1677-09-22", periods=213503, freq="D")
assert dti[0] == Timestamp("1677-09-22")
assert len(dti) == 213503
msg = "Cannot generate range with"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range("1969-05-04", periods=200000000, freq="30000D")
def test_date_range_unsigned_overflow_handling(self):
# GH#24255
# case where `addend = periods * stride` overflows int64 bounds
# but not uint64 bounds
dti = date_range(start="1677-09-22", end="2262-04-11", freq="D")
dti2 = date_range(start=dti[0], periods=len(dti), freq="D")
assert dti2.equals(dti)
dti3 = date_range(end=dti[-1], periods=len(dti), freq="D")
assert dti3.equals(dti)
def test_date_range_int64_overflow_non_recoverable(self):
# GH#24255
# case with start later than 1970-01-01, overflow int64 but not uint64
msg = "Cannot generate range with"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(start="1970-02-01", periods=106752 * 24, freq="H")
# case with end before 1970-01-01, overflow int64 but not uint64
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(end="1969-11-14", periods=106752 * 24, freq="H")
def test_date_range_int64_overflow_stride_endpoint_different_signs(self):
# cases where stride * periods overflow int64 and stride/endpoint
# have different signs
start = Timestamp("2262-02-23")
end = Timestamp("1969-11-14")
expected = date_range(start=start, end=end, freq="-1H")
assert expected[0] == start
assert expected[-1] == end
dti = date_range(end=end, periods=len(expected), freq="-1H")
tm.assert_index_equal(dti, expected)
start2 = Timestamp("1970-02-01")
end2 = Timestamp("1677-10-22")
expected2 = date_range(start=start2, end=end2, freq="-1H")
assert expected2[0] == start2
assert expected2[-1] == end2
dti2 = date_range(start=start2, periods=len(expected2), freq="-1H")
tm.assert_index_equal(dti2, expected2)
def test_date_range_out_of_bounds(self):
# GH#14187
msg = "Cannot generate range"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range("2016-01-01", periods=100000, freq="D")
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(end="1763-10-12", periods=100000, freq="D")
def test_date_range_gen_error(self):
rng = date_range("1/1/2000 00:00", "1/1/2000 00:18", freq="5min")
assert len(rng) == 4
@pytest.mark.parametrize("freq", ["AS", "YS"])
def test_begin_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-01-01", "2014-01-01", "2015-01-01", "2016-01-01", "2017-01-01"],
freq=freq,
)
tm.assert_index_equal(rng, exp)
@pytest.mark.parametrize("freq", ["A", "Y"])
def test_end_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-31"], freq=freq
)
tm.assert_index_equal(rng, exp)
@pytest.mark.parametrize("freq", ["BA", "BY"])
def test_business_end_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-30"], freq=freq
)
tm.assert_index_equal(rng, exp)
def test_date_range_negative_freq(self):
# GH 11018
rng = date_range("2011-12-31", freq="-2A", periods=3)
exp = DatetimeIndex(["2011-12-31", "2009-12-31", "2007-12-31"], freq="-2A")
tm.assert_index_equal(rng, exp)
assert rng.freq == "-2A"
rng = date_range("2011-01-31", freq="-2M", periods=3)
exp = DatetimeIndex(["2011-01-31", "2010-11-30", "2010-09-30"], freq="-2M")
tm.assert_index_equal(rng, exp)
assert rng.freq == "-2M"
def test_date_range_bms_bug(self):
# #1645
rng = date_range("1/1/2000", periods=10, freq="BMS")
ex_first = Timestamp("2000-01-03")
assert rng[0] == ex_first
def test_date_range_normalize(self):
snap = datetime.today()
n = 50
rng = date_range(snap, periods=n, normalize=False, freq="2D")
offset = timedelta(2)
values = DatetimeIndex([snap + i * offset for i in range(n)], freq=offset)
tm.assert_index_equal(rng, values)
rng = date_range("1/1/2000 08:15", periods=n, normalize=False, freq="B")
the_time = time(8, 15)
for val in rng:
assert val.time() == the_time
def test_date_range_fy5252(self):
dr = date_range(
start="2013-01-01",
periods=2,
freq=offsets.FY5253(startingMonth=1, weekday=3, variation="nearest"),
)
assert dr[0] == Timestamp("2013-01-31")
assert dr[1] == Timestamp("2014-01-30")
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
msg = (
"Of the four parameters: start, end, periods, and "
"freq, exactly three must be specified"
)
with pytest.raises(ValueError, match=msg):
date_range(start, end, periods=10, freq="s")
def test_date_range_convenience_periods(self):
# GH 20808
result = date_range("2018-04-24", "2018-04-27", periods=3)
expected = DatetimeIndex(
["2018-04-24 00:00:00", "2018-04-25 12:00:00", "2018-04-27 00:00:00"],
freq=None,
)
tm.assert_index_equal(result, expected)
# Test if spacing remains linear if tz changes to dst in range
result = date_range(
"2018-04-01 01:00:00",
"2018-04-01 04:00:00",
tz="Australia/Sydney",
periods=3,
)
expected = DatetimeIndex(
[
Timestamp("2018-04-01 01:00:00+1100", tz="Australia/Sydney"),
Timestamp("2018-04-01 02:00:00+1000", tz="Australia/Sydney"),
Timestamp("2018-04-01 04:00:00+1000", tz="Australia/Sydney"),
]
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"start,end,result_tz",
[
["20180101", "20180103", "US/Eastern"],
[datetime(2018, 1, 1), datetime(2018, 1, 3), "US/Eastern"],
[Timestamp("20180101"), Timestamp("20180103"), "US/Eastern"],
[
Timestamp("20180101", tz="US/Eastern"),
Timestamp("20180103", tz="US/Eastern"),
"US/Eastern",
],
[
Timestamp("20180101", tz="US/Eastern"),
Timestamp("20180103", tz="US/Eastern"),
None,
],
],
)
def test_date_range_linspacing_tz(self, start, end, result_tz):
# GH 20983
result = date_range(start, end, periods=3, tz=result_tz)
expected = date_range("20180101", periods=3, freq="D", tz="US/Eastern")
tm.assert_index_equal(result, expected)
def test_date_range_businesshour(self):
idx = DatetimeIndex(
[
"2014-07-04 09:00",
"2014-07-04 10:00",
"2014-07-04 11:00",
"2014-07-04 12:00",
"2014-07-04 13:00",
"2014-07-04 14:00",
"2014-07-04 15:00",
"2014-07-04 16:00",
],
freq="BH",
)
rng = date_range("2014-07-04 09:00", "2014-07-04 16:00", freq="BH")
tm.assert_index_equal(idx, rng)
idx = DatetimeIndex(["2014-07-04 16:00", "2014-07-07 09:00"], freq="BH")
rng = date_range("2014-07-04 16:00", "2014-07-07 09:00", freq="BH")
tm.assert_index_equal(idx, rng)
idx = DatetimeIndex(
[
"2014-07-04 09:00",
"2014-07-04 10:00",
"2014-07-04 11:00",
"2014-07-04 12:00",
"2014-07-04 13:00",
"2014-07-04 14:00",
"2014-07-04 15:00",
"2014-07-04 16:00",
"2014-07-07 09:00",
"2014-07-07 10:00",
"2014-07-07 11:00",
"2014-07-07 12:00",
"2014-07-07 13:00",
"2014-07-07 14:00",
"2014-07-07 15:00",
"2014-07-07 16:00",
"2014-07-08 09:00",
"2014-07-08 10:00",
"2014-07-08 11:00",
"2014-07-08 12:00",
"2014-07-08 13:00",
"2014-07-08 14:00",
"2014-07-08 15:00",
"2014-07-08 16:00",
],
freq="BH",
)
rng = date_range("2014-07-04 09:00", "2014-07-08 16:00", freq="BH")
tm.assert_index_equal(idx, rng)
def test_range_misspecified(self):
# GH #1095
msg = (
"Of the four parameters: start, end, periods, and "
"freq, exactly three must be specified"
)
with pytest.raises(ValueError, match=msg):
date_range(start="1/1/2000")
with pytest.raises(ValueError, match=msg):
date_range(end="1/1/2000")
with pytest.raises(ValueError, match=msg):
date_range(periods=10)
with pytest.raises(ValueError, match=msg):
date_range(start="1/1/2000", freq="H")
with pytest.raises(ValueError, match=msg):
date_range(end="1/1/2000", freq="H")
with pytest.raises(ValueError, match=msg):
date_range(periods=10, freq="H")
with pytest.raises(ValueError, match=msg):
date_range()
def test_compat_replace(self):
# https://github.com/statsmodels/statsmodels/issues/3349
# replace should take ints/longs for compat
result = date_range(
Timestamp("1960-04-01 00:00:00", freq="QS-JAN"), periods=76, freq="QS-JAN"
)
assert len(result) == 76
def test_catch_infinite_loop(self):
offset = offsets.DateOffset(minute=5)
# blow up, don't loop forever
msg = "Offset <DateOffset: minute=5> did not increment date"
with pytest.raises(ValueError, match=msg):
date_range(datetime(2011, 11, 11), datetime(2011, 11, 12), freq=offset)
@pytest.mark.parametrize("periods", (1, 2))
def test_wom_len(self, periods):
# https://github.com/pandas-dev/pandas/issues/20517
res = date_range(start="20110101", periods=periods, freq="WOM-1MON")
assert len(res) == periods
def test_construct_over_dst(self):
# GH 20854
pre_dst = Timestamp("2010-11-07 01:00:00").tz_localize(
"US/Pacific", ambiguous=True
)
pst_dst = Timestamp("2010-11-07 01:00:00").tz_localize(
"US/Pacific", ambiguous=False
)
expect_data = [
Timestamp("2010-11-07 00:00:00", tz="US/Pacific"),
pre_dst,
pst_dst,
]
expected = DatetimeIndex(expect_data, freq="H")
result = date_range(start="2010-11-7", periods=3, freq="H", tz="US/Pacific")
tm.assert_index_equal(result, expected)
def test_construct_with_different_start_end_string_format(self):
# GH 12064
result = date_range(
"2013-01-01 00:00:00+09:00", "2013/01/01 02:00:00+09:00", freq="H"
)
expected = DatetimeIndex(
[
Timestamp("2013-01-01 00:00:00+09:00"),
Timestamp("2013-01-01 01:00:00+09:00"),
Timestamp("2013-01-01 02:00:00+09:00"),
],
freq="H",
)
tm.assert_index_equal(result, expected)
def test_error_with_zero_monthends(self):
msg = r"Offset <0 \* MonthEnds> did not increment date"
with pytest.raises(ValueError, match=msg):
date_range("1/1/2000", "1/1/2001", freq=MonthEnd(0))
def test_range_bug(self):
# GH #770
offset = DateOffset(months=3)
result = date_range("2011-1-1", "2012-1-31", freq=offset)
start = datetime(2011, 1, 1)
expected = DatetimeIndex([start + i * offset for i in range(5)], freq=offset)
tm.assert_index_equal(result, expected)
def test_range_tz_pytz(self):
# see gh-2906
tz = timezone("US/Eastern")
start = tz.localize(datetime(2011, 1, 1))
end = tz.localize(datetime(2011, 1, 3))
dr = date_range(start=start, periods=3)
assert dr.tz.zone == tz.zone
assert dr[0] == start
assert dr[2] == end
dr = date_range(end=end, periods=3)
assert dr.tz.zone == tz.zone
assert dr[0] == start
assert dr[2] == end
dr = date_range(start=start, end=end)
assert dr.tz.zone == tz.zone
assert dr[0] == start
assert dr[2] == end
@pytest.mark.parametrize(
"start, end",
[
[
Timestamp(datetime(2014, 3, 6), tz="US/Eastern"),
Timestamp(datetime(2014, 3, 12), tz="US/Eastern"),
],
[
Timestamp(datetime(2013, 11, 1), tz="US/Eastern"),
Timestamp(datetime(2013, 11, 6), tz="US/Eastern"),
],
],
)
def test_range_tz_dst_straddle_pytz(self, start, end):
dr = date_range(start, end, freq="D")
assert dr[0] == start
assert dr[-1] == end
assert np.all(dr.hour == 0)
dr = date_range(start, end, freq="D", tz="US/Eastern")
assert dr[0] == start
assert dr[-1] == end
assert np.all(dr.hour == 0)
dr = date_range(
start.replace(tzinfo=None),
end.replace(tzinfo=None),
freq="D",
tz="US/Eastern",
)
assert dr[0] == start
assert dr[-1] == end
assert np.all(dr.hour == 0)
def test_range_tz_dateutil(self):
# see gh-2906
# Use maybe_get_tz to fix filename in tz under dateutil.
from pandas._libs.tslibs.timezones import maybe_get_tz
tz = lambda x: maybe_get_tz("dateutil/" + x)
start = datetime(2011, 1, 1, tzinfo=tz("US/Eastern"))
end = datetime(2011, 1, 3, tzinfo=tz("US/Eastern"))
dr = date_range(start=start, periods=3)
assert dr.tz == tz("US/Eastern")
assert dr[0] == start
assert dr[2] == end
dr = date_range(end=end, periods=3)
assert dr.tz == tz("US/Eastern")
assert dr[0] == start
assert dr[2] == end
dr = date_range(start=start, end=end)
assert dr.tz == tz("US/Eastern")
assert dr[0] == start
assert dr[2] == end
@pytest.mark.parametrize("freq", ["1D", "3D", "2M", "7W", "3H", "A"])
def test_range_closed(self, freq):
begin = datetime(2011, 1, 1)
end = datetime(2014, 1, 1)
closed = date_range(begin, end, closed=None, freq=freq)
left = date_range(begin, end, closed="left", freq=freq)
right = date_range(begin, end, closed="right", freq=freq)
expected_left = left
expected_right = right
if end == closed[-1]:
expected_left = closed[:-1]
if begin == closed[0]:
expected_right = closed[1:]
tm.assert_index_equal(expected_left, left)
tm.assert_index_equal(expected_right, right)
def test_range_closed_with_tz_aware_start_end(self):
# GH12409, GH12684
begin = Timestamp("2011/1/1", tz="US/Eastern")
end = Timestamp("2014/1/1", tz="US/Eastern")
for freq in ["1D", "3D", "2M", "7W", "3H", "A"]:
closed = date_range(begin, end, closed=None, freq=freq)
left = date_range(begin, end, closed="left", freq=freq)
right = date_range(begin, end, closed="right", freq=freq)
expected_left = left
expected_right = right
if end == closed[-1]:
expected_left = closed[:-1]
if begin == closed[0]:
expected_right = closed[1:]
tm.assert_index_equal(expected_left, left)
tm.assert_index_equal(expected_right, right)
begin = Timestamp("2011/1/1")
end = Timestamp("2014/1/1")
begintz = Timestamp("2011/1/1", tz="US/Eastern")
endtz = Timestamp("2014/1/1", tz="US/Eastern")
for freq in ["1D", "3D", "2M", "7W", "3H", "A"]:
closed = date_range(begin, end, closed=None, freq=freq, tz="US/Eastern")
left = date_range(begin, end, closed="left", freq=freq, tz="US/Eastern")
right = date_range(begin, end, closed="right", freq=freq, tz="US/Eastern")
expected_left = left
expected_right = right
if endtz == closed[-1]:
expected_left = closed[:-1]
if begintz == closed[0]:
expected_right = closed[1:]
tm.assert_index_equal(expected_left, left)
tm.assert_index_equal(expected_right, right)
@pytest.mark.parametrize("closed", ["right", "left", None])
def test_range_closed_boundary(self, closed):
# GH#11804
right_boundary = date_range(
"2015-09-12", "2015-12-01", freq="QS-MAR", closed=closed
)
left_boundary = date_range(
"2015-09-01", "2015-09-12", freq="QS-MAR", closed=closed
)
both_boundary = date_range(
"2015-09-01", "2015-12-01", freq="QS-MAR", closed=closed
)
expected_right = expected_left = expected_both = both_boundary
if closed == "right":
expected_left = both_boundary[1:]
if closed == "left":
expected_right = both_boundary[:-1]
if closed is None:
expected_right = both_boundary[1:]
expected_left = both_boundary[:-1]
tm.assert_index_equal(right_boundary, expected_right)
tm.assert_index_equal(left_boundary, expected_left)
tm.assert_index_equal(both_boundary, expected_both)
def test_years_only(self):
# GH 6961
dr = date_range("2014", "2015", freq="M")
assert dr[0] == datetime(2014, 1, 31)
assert dr[-1] == datetime(2014, 12, 31)
def test_freq_divides_end_in_nanos(self):
# GH 10885
result_1 = date_range("2005-01-12 10:00", "2005-01-12 16:00", freq="345min")
result_2 = date_range("2005-01-13 10:00", "2005-01-13 16:00", freq="345min")
expected_1 = DatetimeIndex(
["2005-01-12 10:00:00", "2005-01-12 15:45:00"],
dtype="datetime64[ns]",
freq="345T",
tz=None,
)
expected_2 = DatetimeIndex(
["2005-01-13 10:00:00", "2005-01-13 15:45:00"],
dtype="datetime64[ns]",
freq="345T",
tz=None,
)
tm.assert_index_equal(result_1, expected_1)
tm.assert_index_equal(result_2, expected_2)
def test_cached_range_bug(self):
rng = date_range("2010-09-01 05:00:00", periods=50, freq=DateOffset(hours=6))
assert len(rng) == 50
assert rng[0] == datetime(2010, 9, 1, 5)
def test_timezone_comparaison_bug(self):
# smoke test
start = Timestamp("20130220 10:00", tz="US/Eastern")
result = date_range(start, periods=2, tz="US/Eastern")
assert len(result) == 2
def test_timezone_comparaison_assert(self):
start = Timestamp("20130220 10:00", tz="US/Eastern")
msg = "Inferred time zone not equal to passed time zone"
with pytest.raises(AssertionError, match=msg):
date_range(start, periods=2, tz="Europe/Berlin")
def test_negative_non_tick_frequency_descending_dates(self, tz_aware_fixture):
# GH 23270
tz = tz_aware_fixture
result = date_range(start="2011-06-01", end="2011-01-01", freq="-1MS", tz=tz)
expected = date_range(end="2011-06-01", start="2011-01-01", freq="1MS", tz=tz)[
::-1
]
tm.assert_index_equal(result, expected)
class TestDateRangeTZ:
"""Tests for date_range with timezones"""
def test_hongkong_tz_convert(self):
# GH#1673 smoke test
dr = date_range("2012-01-01", "2012-01-10", freq="D", tz="Hongkong")
# it works!
dr.hour
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_date_range_span_dst_transition(self, tzstr):
# GH#1778
# Standard -> Daylight Savings Time
dr = date_range("03/06/2012 00:00", periods=200, freq="W-FRI", tz="US/Eastern")
assert (dr.hour == 0).all()
dr = date_range("2012-11-02", periods=10, tz=tzstr)
result = dr.hour
expected = pd.Index([0] * 10)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_date_range_timezone_str_argument(self, tzstr):
tz = timezones.maybe_get_tz(tzstr)
result = date_range("1/1/2000", periods=10, tz=tzstr)
expected = date_range("1/1/2000", periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_date_range_with_fixedoffset_noname(self):
from pandas.tests.indexes.datetimes.test_timezones import fixed_off_no_name
off = fixed_off_no_name
start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)
end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)
rng = date_range(start=start, end=end)
assert off == rng.tz
idx = pd.Index([start, end])
assert off == idx.tz
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_date_range_with_tz(self, tzstr):
stamp = Timestamp("3/11/2012 05:00", tz=tzstr)
assert stamp.hour == 5
rng = date_range("3/11/2012 04:00", periods=10, freq="H", tz=tzstr)
assert stamp == rng[1]
class TestGenRangeGeneration:
def test_generate(self):
rng1 = list(generate_range(START, END, offset=BDay()))
rng2 = list(generate_range(START, END, offset="B"))
assert rng1 == rng2
def test_generate_cday(self):
rng1 = list(generate_range(START, END, offset=CDay()))
rng2 = list(generate_range(START, END, offset="C"))
assert rng1 == rng2
def test_1(self):
rng = list(generate_range(start=datetime(2009, 3, 25), periods=2))
expected = [datetime(2009, 3, 25), datetime(2009, 3, 26)]
assert rng == expected
def test_2(self):
rng = list(generate_range(start=datetime(2008, 1, 1), end=datetime(2008, 1, 3)))
expected = [datetime(2008, 1, 1), datetime(2008, 1, 2), datetime(2008, 1, 3)]
assert rng == expected
def test_3(self):
rng = list(generate_range(start=datetime(2008, 1, 5), end=datetime(2008, 1, 6)))
expected = []
assert rng == expected
def test_precision_finer_than_offset(self):
# GH#9907
result1 = date_range(
start="2015-04-15 00:00:03", end="2016-04-22 00:00:00", freq="Q"
)
result2 = date_range(
start="2015-04-15 00:00:03", end="2015-06-22 00:00:04", freq="W"
)
expected1_list = [
"2015-06-30 00:00:03",
"2015-09-30 00:00:03",
"2015-12-31 00:00:03",
"2016-03-31 00:00:03",
]
expected2_list = [
"2015-04-19 00:00:03",
"2015-04-26 00:00:03",
"2015-05-03 00:00:03",
"2015-05-10 00:00:03",
"2015-05-17 00:00:03",
"2015-05-24 00:00:03",
"2015-05-31 00:00:03",
"2015-06-07 00:00:03",
"2015-06-14 00:00:03",
"2015-06-21 00:00:03",
]
expected1 = DatetimeIndex(
expected1_list, dtype="datetime64[ns]", freq="Q-DEC", tz=None
)
expected2 = DatetimeIndex(
expected2_list, dtype="datetime64[ns]", freq="W-SUN", tz=None
)
tm.assert_index_equal(result1, expected1)
tm.assert_index_equal(result2, expected2)
dt1, dt2 = "2017-01-01", "2017-01-01"
tz1, tz2 = "US/Eastern", "Europe/London"
@pytest.mark.parametrize(
"start,end",
[
(Timestamp(dt1, tz=tz1), Timestamp(dt2)),
(Timestamp(dt1), Timestamp(dt2, tz=tz2)),
(Timestamp(dt1, tz=tz1), Timestamp(dt2, tz=tz2)),
(Timestamp(dt1, tz=tz2), Timestamp(dt2, tz=tz1)),
],
)
def test_mismatching_tz_raises_err(self, start, end):
# issue 18488
msg = "Start and end cannot both be tz-aware with different timezones"
with pytest.raises(TypeError, match=msg):
date_range(start, end)
with pytest.raises(TypeError, match=msg):
date_range(start, end, freq=BDay())
class TestBusinessDateRange:
def test_constructor(self):
bdate_range(START, END, freq=BDay())
bdate_range(START, periods=20, freq=BDay())
bdate_range(end=START, periods=20, freq=BDay())
msg = "periods must be a number, got B"
with pytest.raises(TypeError, match=msg):
date_range("2011-1-1", "2012-1-1", "B")
with pytest.raises(TypeError, match=msg):
bdate_range("2011-1-1", "2012-1-1", "B")
msg = "freq must be specified for bdate_range; use date_range instead"
with pytest.raises(TypeError, match=msg):
bdate_range(START, END, periods=10, freq=None)
def test_misc(self):
end = datetime(2009, 5, 13)
dr = bdate_range(end=end, periods=20)
firstDate = end - 19 * BDay()
assert len(dr) == 20
assert dr[0] == firstDate
assert dr[-1] == end
def test_date_parse_failure(self):
badly_formed_date = "2007/100/1"
msg = "could not convert string to Timestamp"
with pytest.raises(ValueError, match=msg):
Timestamp(badly_formed_date)
with pytest.raises(ValueError, match=msg):
bdate_range(start=badly_formed_date, periods=10)
with pytest.raises(ValueError, match=msg):
bdate_range(end=badly_formed_date, periods=10)
with pytest.raises(ValueError, match=msg):
bdate_range(badly_formed_date, badly_formed_date)
def test_daterange_bug_456(self):
# GH #456
rng1 = bdate_range("12/5/2011", "12/5/2011")
rng2 = bdate_range("12/2/2011", "12/5/2011")
assert rng2._data.freq == BDay()
result = rng1.union(rng2)
assert isinstance(result, DatetimeIndex)
@pytest.mark.parametrize("closed", ["left", "right"])
def test_bdays_and_open_boundaries(self, closed):
# GH 6673
start = "2018-07-21" # Saturday
end = "2018-07-29" # Sunday
result = date_range(start, end, freq="B", closed=closed)
bday_start = "2018-07-23" # Monday
bday_end = "2018-07-27" # Friday
expected = date_range(bday_start, bday_end, freq="D")
tm.assert_index_equal(result, expected)
# Note: we do _not_ expect the freqs to match here
def test_bday_near_overflow(self):
# GH#24252 avoid doing unnecessary addition that _would_ overflow
start = Timestamp.max.floor("D").to_pydatetime()
rng = date_range(start, end=None, periods=1, freq="B")
expected = DatetimeIndex([start], freq="B")
tm.assert_index_equal(rng, expected)
def test_bday_overflow_error(self):
# GH#24252 check that we get OutOfBoundsDatetime and not OverflowError
msg = "Out of bounds nanosecond timestamp"
start =
|
Timestamp.max.floor("D")
|
pandas.Timestamp.max.floor
|
import copy
import random
import numpy as np
import pandas as pd
import pytest
from scipy import sparse
import sklearn.datasets
import sklearn.model_selection
from autosklearn.data.feature_validator import FeatureValidator
# Fixtures to be used in this class. By default all elements have 100 datapoints
@pytest.fixture
def input_data_featuretest(request):
if request.param == 'numpy_categoricalonly_nonan':
return np.random.randint(10, size=(100, 10))
elif request.param == 'numpy_numericalonly_nonan':
return np.random.uniform(10, size=(100, 10))
elif request.param == 'numpy_mixed_nonan':
return np.column_stack([
np.random.uniform(10, size=(100, 3)),
np.random.randint(10, size=(100, 3)),
np.random.uniform(10, size=(100, 3)),
np.random.randint(10, size=(100, 1)),
])
elif request.param == 'numpy_string_nonan':
return np.array([
['a', 'b', 'c', 'a', 'b', 'c'],
['a', 'b', 'd', 'r', 'b', 'c'],
])
elif request.param == 'numpy_categoricalonly_nan':
array = np.random.randint(10, size=(100, 10)).astype('float')
array[50, 0:5] = np.nan
return array
elif request.param == 'numpy_numericalonly_nan':
array = np.random.uniform(10, size=(100, 10)).astype('float')
array[50, 0:5] = np.nan
# Somehow array is changed to dtype object after np.nan
return array.astype('float')
elif request.param == 'numpy_mixed_nan':
array = np.column_stack([
np.random.uniform(10, size=(100, 3)),
np.random.randint(10, size=(100, 3)),
np.random.uniform(10, size=(100, 3)),
np.random.randint(10, size=(100, 1)),
])
array[50, 0:5] = np.nan
return array
elif request.param == 'numpy_string_nan':
return np.array([
['a', 'b', 'c', 'a', 'b', 'c'],
[np.nan, 'b', 'd', 'r', 'b', 'c'],
])
elif request.param == 'pandas_categoricalonly_nonan':
return pd.DataFrame([
{'A': 1, 'B': 2},
{'A': 3, 'B': 4},
], dtype='category')
elif request.param == 'pandas_numericalonly_nonan':
return pd.DataFrame([
{'A': 1, 'B': 2},
{'A': 3, 'B': 4},
], dtype='float')
elif request.param == 'pandas_mixed_nonan':
frame = pd.DataFrame([
{'A': 1, 'B': 2},
{'A': 3, 'B': 4},
], dtype='category')
frame['B'] = pd.to_numeric(frame['B'])
return frame
elif request.param == 'pandas_categoricalonly_nan':
return pd.DataFrame([
{'A': 1, 'B': 2, 'C': np.nan},
{'A': 3, 'C': np.nan},
], dtype='category')
elif request.param == 'pandas_numericalonly_nan':
return pd.DataFrame([
{'A': 1, 'B': 2, 'C': np.nan},
{'A': 3, 'C': np.nan},
], dtype='float')
elif request.param == 'pandas_mixed_nan':
frame = pd.DataFrame([
{'A': 1, 'B': 2, 'C': 8},
{'A': 3, 'B': 4},
], dtype='category')
frame['B'] = pd.to_numeric(frame['B'])
return frame
elif request.param == 'pandas_string_nonan':
return pd.DataFrame([
{'A': 1, 'B': 2},
{'A': 3, 'B': 4},
], dtype='string')
elif request.param == 'list_categoricalonly_nonan':
return [
['a', 'b', 'c', 'd'],
['e', 'f', 'c', 'd'],
]
elif request.param == 'list_numericalonly_nonan':
return [
[1, 2, 3, 4],
[5, 6, 7, 8]
]
elif request.param == 'list_mixed_nonan':
return [
['a', 2, 3, 4],
['b', 6, 7, 8]
]
elif request.param == 'list_categoricalonly_nan':
return [
['a', 'b', 'c', np.nan],
['e', 'f', 'c', 'd'],
]
elif request.param == 'list_numericalonly_nan':
return [
[1, 2, 3, np.nan],
[5, 6, 7, 8]
]
elif request.param == 'list_mixed_nan':
return [
['a', np.nan, 3, 4],
['b', 6, 7, 8]
]
elif 'sparse' in request.param:
# We expect the names to be of the type sparse_csc_nonan
sparse_, type_, nan_ = request.param.split('_')
if 'nonan' in nan_:
data = np.ones(3)
else:
data = np.array([1, 2, np.nan])
# Then the type of sparse
row_ind = np.array([0, 1, 2])
col_ind = np.array([1, 2, 1])
if 'csc' in type_:
return sparse.csc_matrix((data, (row_ind, col_ind)))
elif 'csr' in type_:
return sparse.csr_matrix((data, (row_ind, col_ind)))
elif 'coo' in type_:
return sparse.coo_matrix((data, (row_ind, col_ind)))
elif 'bsr' in type_:
return sparse.bsr_matrix((data, (row_ind, col_ind)))
elif 'lil' in type_:
return sparse.lil_matrix((data))
elif 'dok' in type_:
return sparse.dok_matrix(np.vstack((data, data, data)))
elif 'dia' in type_:
return sparse.dia_matrix(np.vstack((data, data, data)))
else:
ValueError("Unsupported indirect fixture {}".format(request.param))
elif 'openml' in request.param:
_, openml_id = request.param.split('_')
X, y = sklearn.datasets.fetch_openml(data_id=int(openml_id),
return_X_y=True, as_frame=True)
return X
else:
ValueError("Unsupported indirect fixture {}".format(request.param))
# Actual checks for the features
@pytest.mark.parametrize(
'input_data_featuretest',
(
'numpy_categoricalonly_nonan',
'numpy_numericalonly_nonan',
'numpy_mixed_nonan',
'numpy_categoricalonly_nan',
'numpy_numericalonly_nan',
'numpy_mixed_nan',
'pandas_categoricalonly_nonan',
'pandas_numericalonly_nonan',
'pandas_mixed_nonan',
'pandas_numericalonly_nan',
'list_numericalonly_nonan',
'list_numericalonly_nan',
'sparse_bsr_nonan',
'sparse_bsr_nan',
'sparse_coo_nonan',
'sparse_coo_nan',
'sparse_csc_nonan',
'sparse_csc_nan',
'sparse_csr_nonan',
'sparse_csr_nan',
'sparse_dia_nonan',
'sparse_dia_nan',
'sparse_dok_nonan',
'sparse_dok_nan',
'sparse_lil_nonan',
'sparse_lil_nan',
'openml_40981', # Australian
),
indirect=True
)
def test_featurevalidator_supported_types(input_data_featuretest):
validator = FeatureValidator()
validator.fit(input_data_featuretest, input_data_featuretest)
transformed_X = validator.transform(input_data_featuretest)
if sparse.issparse(input_data_featuretest):
assert sparse.issparse(transformed_X)
else:
assert isinstance(transformed_X, np.ndarray)
assert np.shape(input_data_featuretest) == np.shape(transformed_X)
assert np.issubdtype(transformed_X.dtype, np.number)
assert validator._is_fitted
@pytest.mark.parametrize(
'input_data_featuretest',
(
'list_categoricalonly_nonan',
'list_categoricalonly_nan',
'list_mixed_nonan',
'list_mixed_nan',
),
indirect=True
)
def test_featurevalidator_unsupported_list(input_data_featuretest):
validator = FeatureValidator()
with pytest.raises(ValueError, match=r".*has invalid type object. Cast it to a valid dtype.*"):
validator.fit(input_data_featuretest)
@pytest.mark.parametrize(
'input_data_featuretest',
(
'numpy_string_nonan',
'numpy_string_nan',
),
indirect=True
)
def test_featurevalidator_unsupported_numpy(input_data_featuretest):
validator = FeatureValidator()
with pytest.raises(ValueError, match=r".*When providing a numpy array.*not supported."):
validator.fit(input_data_featuretest)
@pytest.mark.parametrize(
'input_data_featuretest',
(
'pandas_categoricalonly_nan',
'pandas_mixed_nan',
'openml_179', # adult workclass has NaN in columns
),
indirect=True
)
def test_featurevalidator_unsupported_pandas(input_data_featuretest):
validator = FeatureValidator()
with pytest.raises(ValueError, match=r"Categorical features in a dataframe.*missing/NaN"):
validator.fit(input_data_featuretest)
@pytest.mark.parametrize(
'input_data_featuretest',
(
'numpy_categoricalonly_nonan',
'numpy_mixed_nonan',
'numpy_categoricalonly_nan',
'numpy_mixed_nan',
'pandas_categoricalonly_nonan',
'pandas_mixed_nonan',
'list_numericalonly_nonan',
'list_numericalonly_nan',
'sparse_bsr_nonan',
'sparse_bsr_nan',
'sparse_coo_nonan',
'sparse_coo_nan',
'sparse_csc_nonan',
'sparse_csc_nan',
'sparse_csr_nonan',
'sparse_csr_nan',
'sparse_dia_nonan',
'sparse_dia_nan',
'sparse_dok_nonan',
'sparse_dok_nan',
'sparse_lil_nonan',
),
indirect=True
)
def test_featurevalidator_fitontypeA_transformtypeB(input_data_featuretest):
"""
Check if we can fit in a given type (numpy) yet transform
if the user changes the type (pandas then)
This is problematic only in the case we create an encoder
"""
validator = FeatureValidator()
validator.fit(input_data_featuretest, input_data_featuretest)
if isinstance(input_data_featuretest, pd.DataFrame):
complementary_type = input_data_featuretest.to_numpy()
elif isinstance(input_data_featuretest, np.ndarray):
complementary_type = pd.DataFrame(input_data_featuretest)
elif isinstance(input_data_featuretest, list):
complementary_type = pd.DataFrame(input_data_featuretest)
elif sparse.issparse(input_data_featuretest):
complementary_type = sparse.csr_matrix(input_data_featuretest.todense())
else:
raise ValueError(type(input_data_featuretest))
transformed_X = validator.transform(complementary_type)
assert np.shape(input_data_featuretest) == np.shape(transformed_X)
assert np.issubdtype(transformed_X.dtype, np.number)
assert validator._is_fitted
def test_featurevalidator_get_columns_to_encode():
"""
Makes sure that encoded columns are returned by _get_columns_to_encode
whereas numerical columns are not returned
"""
validator = FeatureValidator()
df = pd.DataFrame([
{'int': 1, 'float': 1.0, 'category': 'one', 'bool': True},
{'int': 2, 'float': 2.0, 'category': 'two', 'bool': False},
])
for col in df.columns:
df[col] = df[col].astype(col)
enc_columns, feature_types = validator._get_columns_to_encode(df)
assert enc_columns == ['category', 'bool']
assert feature_types == ['numerical', 'numerical', 'categorical', 'categorical']
def test_features_unsupported_calls_are_raised():
"""
Makes sure we raise a proper message to the user,
when providing not supported data input or using the validator in a way that is not
expected
"""
validator = FeatureValidator()
with pytest.raises(ValueError, match=r"Auto-sklearn does not support time"):
validator.fit(
pd.DataFrame({'datetime': [pd.Timestamp('20180310')]})
)
with pytest.raises(ValueError, match="has invalid type object"):
validator.fit(
pd.DataFrame({'string': ['foo']})
)
with pytest.raises(ValueError, match=r"Auto-sklearn only supports.*yet, the provided input"):
validator.fit({'input1': 1, 'input2': 2})
with pytest.raises(ValueError, match=r"has unsupported dtype string"):
validator.fit(
|
pd.DataFrame([{'A': 1, 'B': 2}], dtype='string')
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Retrieve bikeshare stations metadata."""
# pylint: disable=invalid-name
from io import BytesIO
from typing import Dict, List
from urllib.request import urlopen
from zipfile import ZipFile
import geopandas as gpd
import pandas as pd
import pandera as pa
import requests
ch_essentials_schema = pa.DataFrameSchema(
columns={
"ID": pa.Column(pa.Int),
"NAME": pa.Column(pd.StringDtype()),
"POI_LATITUDE": pa.Column(
pa.Float64,
nullable=True,
),
"POI_LONGITUDE": pa.Column(
pa.Float64,
nullable=True,
),
},
index=pa.Index(pa.Int),
)
poi_schema = pa.DataFrameSchema(
columns={
"ID": pa.Column(pa.Int, unique=True),
"ADDRESS_INFO": pa.Column(pd.StringDtype()),
"NAME": pa.Column(pd.StringDtype(), unique=True),
"CATEGORY": pa.Column(pd.StringDtype()),
"PHONE": pa.Column(pd.StringDtype()),
"EMAIL": pa.Column(pd.StringDtype()),
"WEBSITE": pa.Column(pd.StringDtype()),
"GEOID": pa.Column(pa.Float, nullable=True),
"RECEIVED_DATE": pa.Column(pd.StringDtype()),
"ADDRESS_POINT_ID": pa.Column(pa.Float, nullable=True),
"LINEAR_NAME_FULL": pa.Column(pd.StringDtype()),
"ADDRESS_FULL": pa.Column(pd.StringDtype()),
"POSTAL_CODE": pa.Column(pd.StringDtype()),
"MUNICIPALITY": pa.Column(pd.StringDtype()),
"CITY": pa.Column(pd.StringDtype()),
"PLACE_NAME": pa.Column(pd.StringDtype()),
"GENERAL_USE_CODE": pa.Column(pa.Float, nullable=True),
"CENTRELINE": pa.Column(pa.Float, nullable=True),
"LO_NUM": pa.Column(pa.Float, nullable=True),
"LO_NUM_SUF": pa.Column(pd.StringDtype()),
"HI_NUM": pa.Column(pd.StringDtype()),
"HI_NUM_SUF": pa.Column(pd.StringDtype()),
"LINEAR_NAME_ID": pa.Column(pa.Float, nullable=True),
"WARD": pa.Column(pd.StringDtype()),
"WARD_2003": pa.Column(pa.Float, nullable=True),
"WARD_2018": pa.Column(pa.Float, nullable=True),
"MI_PRINX": pa.Column(pa.Float, nullable=True),
"ATTRACTION": pa.Column(pd.StringDtype(), unique=True),
"MAP_ACCESS": pa.Column(pd.StringDtype()),
"POI_LONGITUDE": pa.Column(pa.Float, unique=False),
"POI_LATITUDE": pa.Column(pa.Float, unique=False),
},
index=pa.Index(pa.Int),
)
gdf_schema = pa.DataFrameSchema(
columns={
"AREA_ID": pa.Column(pa.Int),
"AREA_SHORT_CODE": pa.Column(pd.StringDtype()),
"AREA_LONG_CODE": pa.Column(pd.StringDtype()),
"AREA_NAME": pa.Column(pd.StringDtype()),
"Shape__Area": pa.Column(pa.Float64),
# "Shape__Length": pa.Column(pa.Float64),
# "LATITUDE": pa.Column(pd.StringDtype(), nullable=True),
"AREA_LATITUDE": pa.Column(pa.Float64),
# "LONGITUDE": pa.Column(pd.StringDtype(), nullable=True),
"AREA_LONGITUDE": pa.Column(pa.Float64),
},
index=pa.Index(pa.Int),
)
pub_trans_locations_schema = pa.DataFrameSchema(
columns={
"stop_id": pa.Column(pa.Int),
"stop_code": pa.Column(pa.Int),
"stop_name": pa.Column(pd.StringDtype()),
"stop_desc": pa.Column(pd.StringDtype(), nullable=True),
"lat": pa.Column(pa.Float64),
"lon": pa.Column(pa.Float64),
"zone_id": pa.Column(pa.Float64, nullable=True),
"stop_url": pa.Column(pd.StringDtype(), nullable=True),
"location_type": pa.Column(pa.Float64, nullable=True),
"parent_station": pa.Column(pa.Float64, nullable=True),
"stop_timezone": pa.Column(pa.Float64, nullable=True),
"wheelchair_boarding": pa.Column(pa.Int),
},
index=pa.Index(pa.Int),
)
coll_univ_schema = pa.DataFrameSchema(
columns={
"institution_id": pa.Column(pa.Int),
"institution_name": pa.Column(pd.StringDtype()),
"lat": pa.Column(pa.Float64),
"lon": pa.Column(pa.Float64),
},
index=pa.Index(pa.Int),
)
def get_lat_long(row):
"""Get latitude and longitude."""
return row["coordinates"]
@pa.check_output(poi_schema)
def get_poi_data(url: str, poi_params: Dict) -> pd.DataFrame:
"""Get points of interest within city boundaries."""
poi_dtypes_dict = dict(
ADDRESS_INFO=pd.StringDtype(),
NAME=pd.StringDtype(),
CATEGORY=pd.StringDtype(),
PHONE=pd.StringDtype(),
EMAIL=pd.StringDtype(),
WEBSITE=pd.StringDtype(),
RECEIVED_DATE=pd.StringDtype(),
LINEAR_NAME_FULL=pd.StringDtype(),
ADDRESS_FULL=pd.StringDtype(),
POSTAL_CODE=pd.StringDtype(),
MUNICIPALITY=pd.StringDtype(),
CITY=pd.StringDtype(),
PLACE_NAME=pd.StringDtype(),
LO_NUM_SUF=pd.StringDtype(),
HI_NUM=pd.StringDtype(),
HI_NUM_SUF=pd.StringDtype(),
WARD=pd.StringDtype(),
ATTRACTION=pd.StringDtype(),
MAP_ACCESS=pd.StringDtype(),
)
package = requests.get(url, params=poi_params).json()
poi_url = package["result"]["resources"][0]["url"]
df =
|
pd.read_csv(poi_url)
|
pandas.read_csv
|
"""Convenience methods for data visualization
- matplotlib, seaborn, statsmodels, pandas
Author: <NAME>
License: MIT
"""
import numpy as np
import scipy
import pandas as pd
from pandas import DataFrame, Series
import wget
import os
import re
import time
import requests
import calendar
from datetime import datetime
from pandas.api.types import is_list_like, is_datetime64_any_dtype
from pandas.api.types import is_integer_dtype, is_string_dtype, is_numeric_dtype
from pandas.api import types
from numpy.ma import masked_invalid as valid
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import dates as mdates
from matplotlib import colors, cm
from matplotlib.lines import Line2D
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters() # for date formatting in plots
try:
from settings import ECHO
except:
ECHO = False
# plt.style.use('ggplot')
def row_formatted(df, formats={}, width=None):
"""Apply display formats by row index, and set row index width
Examples
--------
row_formatted(prices, formats={'vwap': '{:.0f}', 'mid': '{:.3f}'})
"""
out = df.apply(lambda x: x.map(formats.get(x.name,'{}').format), axis=1)
if width:
out.index = out.index.str.slice(0, width)
return out
def plot_bands(mean, stderr, width=1.96, x=None, ylabel=None, xlabel=None,
c="b", loc='best', legend=None, ax=None, fontsize=10,
title=None, hline=None, vline=None):
"""Line plot a series with confidence bands"""
ax = ax or plt.gca()
if x is None:
x = np.arange(len(mean)) # x-axis is event day number
if hline is not None:
if not is_list_like(hline):
hline = [hline]
for line in hline:
ax.axhline(line, linestyle=':', color='g')
if vline is not None:
if not is_list_like(vline):
vline = [vline]
for line in vline:
ax.axvline(line, linestyle=':', color='g')
ax.plot(x, mean, ls='-', c=c)
ax.fill_between(x, mean-(width*np.array(stderr)),
mean+(width*np.array(stderr)), alpha=0.3, color=c)
if legend:
ax.legend(legend, loc=loc, fontsize=fontsize)
ax.set_title(title, fontsize=fontsize+4)
ax.set_ylabel(ylabel, fontsize=fontsize+2)
ax.set_xlabel(xlabel, fontsize=fontsize+2)
def plot_scatter(x, y, labels=None, ax=None, xlabel=None, ylabel=None,
c=None, cmap=None, alpha=0.75, edgecolor=None, s=10,
marker=None, title='', abline=True, fontsize=12):
"""Scatter plot, optionally with abline slope and point labels
Parameters
----------
x : Series or array-like
to plot on horizontal axis
y : Series or array-like
to plot on horizontal axis
labels : Series or array-like of str, default is None
annotate plotted points with text
ax : matplotlib axes object, optional
from plt.subplots() or plt.gca(), default is None
xlabel : str, optional
horizontal axis label, default is x.name else None
ylabel : str, optional
vertical axis label, default is y.name else None
title : str, optional
title of plot, default is ''
abline : bool, default None
plot abline if True, or 45-degree if False, If None, do not plot slope
s : numeric, default 10
marker area size
"""
if ax is None:
ax = plt.gca()
ax.cla()
ax.clear()
if c is not None and cmap is not None:
cmin = min(c)
cmax = max(c)
norm = colors.Normalize(cmin - (cmax-cmin)/2, cmax)
c = cm.ScalarMappable(norm=norm, cmap=cmap).to_rgba(c)
cmap = None
cax = ax.scatter(x, y, marker=marker, s=s, c=c, alpha=alpha,
edgecolor=edgecolor, cmap=cmap)
#cmap=plt.cm.get_cmap('tab10', 3)
if abline is not None:
xmin, xmax, ymin, ymax = ax.axis()
if abline: # plot fitted slope
f = ~(np.isnan(x) | np.isnan(y))
slope, intercept = np.polyfit(list(x[f]), list(y[f]), 1)
y_pred = [slope * i + intercept for i in list(x[f])]
ax.plot(x[f], y_pred, 'g-')
else: # plot 45-degree line
bottom_left, top_right = min(xmin, ymin), max(xmax, ymax)
ax.plot([bottom_left, top_right], [bottom_left, top_right], 'g-')
xlabel = xlabel or (x.name if hasattr(x, 'name') else None)
ylabel = ylabel or (y.name if hasattr(y, 'name') else None)
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize=fontsize)
if ylabel is not None:
ax.set_ylabel(ylabel, fontsize=fontsize)
if labels is not None:
for t, xt, yt in zip(labels, x, y):
plt.text(xt * 1.01, yt * 1.01, t, fontsize=fontsize)
ax.set_title(title, fontsize=fontsize+4)
mfc = cax.get_fc()[0]
return Line2D([0], [0], marker=marker, mfc=mfc, ms=10, ls='', c=mfc)
def plot_hist(*args, kde=True, hist=False, bins=None, pdf=scipy.stats.norm.pdf,
ax=None, title='', xlabel='', ylabel='density', fontsize=12):
"""Histogram bar plot with target density"""
ax = ax or plt.gca()
ax=plt.gca()
for arg in args:
frame = DataFrame(arg)
for col in frame.columns:
y = frame[col].notnull().values
sns.distplot(frame[col][y], kde=kde, hist=hist,
bins=bins, label=col, ax=ax)
if pdf:
if not types.is_list_like(pdf):
pdf = [pdf]
if isinstance(pdf, dict):
labels = list(pdf.keys())
pdf = list(pdf.values())
else:
labels = None
pdf = list(pdf)
bx = ax.twinx() if args else ax
bx.yaxis.set_tick_params(rotation=0, labelsize=fontsize)
x= np.linspace(*plt.xlim(), 100)
for i, p in enumerate(pdf):
bx.plot(x, p(x), label=labels[i] if labels else None,
color=f"C{len(args)+i}")
if labels:
bx.legend(labels, loc='center right')
ax.legend(loc='center left')
ax.xaxis.set_tick_params(rotation=0, labelsize=fontsize)
ax.yaxis.set_tick_params(rotation=0, labelsize=fontsize)
ax.set_title(title, fontsize=fontsize+4)
ax.set_ylabel(ylabel, fontsize=fontsize+4)
ax.set_xlabel(xlabel, fontsize=fontsize+4)
def plot_bar(y, ax=None, labels=None, xlabel=None, ylabel=None, fontsize=12,
title='', legend=None, loc='best', labelsize=8, rotation=0):
"""Bar plot with annotated points"""
ax = ax or plt.gca()
bars = list(np.ravel(y.plot.bar(ax=ax, width=0.8).containers, order='F'))
ax.set_title(title, fontsize=fontsize+4)
ax.xaxis.set_tick_params(rotation=0, labelsize=fontsize)
ax.yaxis.set_tick_params(rotation=0, labelsize=fontsize)
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize=fontsize+2)
if ylabel is not None:
ax.set_ylabel(ylabel, fontsize=fontsize+2)
if legend is not None:
ax.legend(legend, loc)
elif loc is not None:
ax.legend(loc=loc)
if labels is not None:
for pt, freq in zip(bars, np.ravel(labels)):
ax.annotate(str(freq), fontsize=labelsize,
xy=(pt.get_x() + pt.get_width() / 2, pt.get_height()),
xytext=(0, 3), textcoords="offset points",
ha='center', va='bottom', rotation=rotation)
def plot_date(y1, y2=None, ax=None, xmin=0, xmax=99999999, fontsize=12,
label1=None, label2=None, legend1=None, legend2=None, cn=0,
loc1='upper left', loc2='upper right', ls1='-', ls2='-',
hlines=[], vlines=[], vspans=[], marker=None,
rescale=False, yscale=False, title='', points=None, **kwargs):
"""Line plot with int date on x-axis, and primary and secondary y-dataframes
Parameters
----------
y1 : DataFrame
to plot on primary y-axis
y2 : DataFrame, optional
to plot on secondary y-axis (default is None)
ax : matplotlib axes object, optional
from plt.subplots() or plt.gca(), default is None
cn : int, default is 0
to cycle through CN colors starting at N=cn
xmin : int, optional
minimum of x-axis date range (default is auto)
xmax : int, optional
maximum of x-axis date range (default is auto)
hlines : list of int (default = [])
y-axis points where to place horizontal lines
vlines : list of int (default = [])
x-axis points where to place vertical lines
vspans : list of int tuples (default = [])
vertical regions to highlight
"""
ax = ax or plt.gca()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y%m%d'))
if y1 is not None:
y1 = DataFrame(y1)
y1 = y1.loc[(y1.index >= xmin) & (y1.index <= xmax)]
base = y1.loc[max(y1.notna().idxmax()),:] if rescale else 1
#sns.lineplot(x = pd.to_datetime(y1.index[f], format='%Y%m%d'),
#y = y1.loc[f], ax=ax)
for ci, c in enumerate(y1.columns):
f = y1.loc[:,c].notnull().values
ax.plot(pd.to_datetime(y1.index[f], format='%Y%m%d'),
y1.loc[f,c] / (base[c] if rescale else 1),
marker=marker, linestyle=ls1, color=f'C{ci+cn}')
if points is not None:
ax.scatter(pd.to_datetime(points.index, format='%Y%m%d'), points,
marker='o', color='r')
if len(y1.columns) > 1 or legend1:
ax.set_ylabel('')
ax.legend(legend1 or y1.columns, fontsize=fontsize, loc=loc1)
if label1:
ax.set_ylabel(label1, fontsize=fontsize+2)
if y2 is not None:
y2 = DataFrame(y2)
y2 = y2.loc[(y2.index >= xmin) & (y2.index <= xmax)]
base = y2.loc[max(y2.notna().idxmax()),:] if rescale else 1
bx = ax.twinx()
for cj, c in enumerate(y2.columns):
g = y2.loc[:,c].notnull().values
bx.plot(pd.to_datetime(y2.index[g], format='%Y%m%d'),
y2.loc[g, c] / (base[c] if rescale else 1),
marker=marker, linestyle=ls2, color=f"C{ci+cj+cn+1}")
if yscale:
amin, amax = ax.get_ylim()
bmin, bmax = bx.get_ylim()
ax.set_ylim(min(amin, bmin), max(amax, bmax))
if len(y2.columns) > 1 or legend2:
bx.set_ylabel('')
bx.legend(legend2 or y2.columns, fontsize=fontsize, loc=loc2)
if label2:
bx.set_ylabel(label2, fontsize=fontsize+2)
for hline in hlines:
plt.axhline(hline, linestyle='-.', color='y')
for vline in vlines:
plt.axvline(pd.to_datetime(vline, format='%Y%m%d'), ls='-.', color='y')
for vspan in vspans:
plt.axvspan(*([pd.to_datetime(v, format='%Y%m%d') for v in vspan]),
alpha=0.5, color='grey')
ax.xaxis.set_tick_params(rotation=0, labelsize=fontsize)
ax.yaxis.set_tick_params(rotation=0, labelsize=fontsize)
plt.title(title, fontsize=fontsize+4)
open_t = pd.to_datetime('1900-01-01T09:30')
close_t = pd.to_datetime('1900-01-01T16:00')
def plot_time(y1, y2=None, ax= None, xmin=open_t, xmax=close_t, marker=None,
title='', loc1=None, loc2=None, legend1=None, legend2=None,
fontsize=12, **kwargs):
"""Plot lines with time on x-axis, and primary and secondary y-axis
Parameters
----------
y1 : DataFrame
to plot on left axis
y2: DataFrame or None
to plot on right axis
ax : axis
matplotlib axes object to plot in
xmin : datetime or None, default is '1900-01-01T09:30'
left-most x-axis time, None to include all
xmax : datetime, or None, default is '1900-01-01T16:00'
right-most x-axis time, None to include all
marker : str, default is None
style of market to plot
title : str, default is ''
text to display as title
loc1, loc2 : str, default is None
locations to place legend/s
legend1, legend2 : list of str, default is None
labels to display in legend
"""
ax = ax or plt.gca()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
cn = 0 # to cycle through matplotlib 'CN' color palette
left =
|
DataFrame(y1)
|
pandas.DataFrame
|
# ------------------- Graphical User Interface for Network Analysis ------------------- #
# Libraries
import warnings
warnings.filterwarnings("ignore")
from graph_tool.all import *
import graph_tool.all as gt
import ipywidgets as widgets
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import clear_output
import seaborn as sns
from ipywidgets import *
import rpy2.robjects.packages as rpackages
from rpy2.robjects.packages import importr
import rpy2
import rpy2.robjects as robjects
from rpy2.robjects.vectors import StrVector
import pandas as pd
from IPython.utils import io
import random
import numpy as np
# Libraries for Download button
import base64
import hashlib
from typing import Callable
import ipywidgets
from IPython.display import HTML, display
# Installing R packages
utils = rpackages.importr('utils')
with io.capture_output() as captured:
utils.install_packages('poweRlaw', repos="https://cloud.r-project.org")
x = rpackages.importr('poweRlaw')
# Creating a My_Network class to hold functions for all network analysis methods
class My_Network:
def __init__(self, file_name):
# Network class is initialized through the file upload
if ".csv" in file_name:
self.G = graph_tool.load_graph_from_csv(file_name)
if ".graphml" in file_name:
self.G = graph_tool.load_graph(file_name)
def prepare_the_network(self):
"""
Network preparation includes:
1) Making it undirected
2) Removal of parallel edges if they are present
3) Extraction of the largest connected component that is treated as the final ready-to-use network (all other components are removed).
"""
self.G.set_directed(False) # 1)
graph_tool.stats.remove_parallel_edges(self.G) # 2)
# 3)
comp, hist = graph_tool.topology.label_components(self.G)
label = gt.label_largest_component(self.G)
to_remove = []
for v in self.G.vertices():
if label[v]==0:
to_remove.append(v)
for v in reversed(sorted(to_remove)):
self.G.remove_vertex(v)
"""
The following functions are responsible for calculation of centrality measures and clustering coefficient.
It is done by generating a corresponding map of the form: node <---> value of the measure.
"""
def create_degree_distribution_map(self):
my_map = self.G.degree_property_map("total")
return my_map
def create_betweenness_distribution_map(self):
v_betweeness_map, e_betweenness_map = graph_tool.centrality.betweenness(self.G)
my_map = v_betweeness_map
return my_map
def create_closeness_distribution_map(self):
my_map = graph_tool.centrality.closeness(self.G)
return my_map
def create_eigenvector_distribution_map(self):
eigen_value, v_eigen_map = graph_tool.centrality.eigenvector(self.G)
my_map = v_eigen_map
return my_map
def create_clustering_map(self):
my_map = graph_tool.clustering.local_clustering(self.G)
return my_map
def create_random_map(self):
# Corresponds to the generation of the random ranking of the nodes. Each number is assesed a random place in the ranking.
# Its position is saved within the vertex property map as it is done for other metrics.
r = self.G.new_vertex_property("double")
indexes = np.arange(self.G.num_vertices())
np.random.shuffle(indexes)
r.a = indexes
return r
def plot_map_histogram(self, my_map, measure_name, block = True):
"""
plot_map_histogram function contains a code for the plot generation
using matplotlib library given the graph-tool map for the measure of interest.
"""
# General settings:
plt.style.use('seaborn-whitegrid')
fig, ax = plt.subplots(constrained_layout=True, figsize=(5, 5))
FONT = 15
# Preparing the data:
my_map = my_map.fa # Extraction of the map's values - now the normal pythonic list is obtained as the representation of the measure's values.
# Calculating basic statistics:
to_calculate_statistics = list(my_map)
avg = round(np.mean(to_calculate_statistics),4)
std = round(np.std(to_calculate_statistics),2)
# Creating the histogram:
n=15
a = ax.hist(my_map, bins=n, facecolor="lightblue",weights=np.zeros_like(my_map) + 1. / len(my_map))
bins_mean = [0.5 * (a[1][j] + a[1][j+1]) for j in range(n)]
sticks_to_mark = ([], [])
for k in range(len(a[0])):
if a[0][k] == 0:
pass
else:
sticks_to_mark[0].append(bins_mean[k])
sticks_to_mark[1].append(a[0][k])
ax.plot(sticks_to_mark[0], sticks_to_mark[1], "b+")
ax.set_xlabel("Value", fontsize = FONT)
ax.set_ylabel("Fraction of nodes", fontsize = FONT)
ax.set_title(measure_name +" histogram \n Mean value: " + str(avg)+ ", Std: "+ str(std), fontsize = FONT)
plt.show(block=block)
return fig, ax
def hubs_impact_check(self):
"""
hubs_impact_check function is used for the evaluation of hubs and low-degree nodes' contribution to the number of links present in the graph.
This is done by extracting all the possible values of the degree (1) and then looping over them (2). Within the loop for each degree number
all nodes with the degree below or equal to it are extracted to form the subnetwork (3). The number of links and nodes in the subnetwork
is divided by the corresponding total numbers in the network (4) to evaluate the contribution of the following degree groups.
"""
largest_N = self.G.num_vertices()
largest_E = self.G.num_edges()
degrees = self.G.get_total_degrees(self.G.get_vertices())
Ns = []
Es = []
degrees_set = list(set(degrees)) # 1)
degrees_set.sort()
degrees_map = self.G.degree_property_map("total")
for degree in degrees_set: # 2)
cut = degree
u = gt.GraphView(self.G, vfilt = lambda v: degrees_map[v]<=cut) # 3)
current_N = u.num_vertices()/largest_N
current_E = u.num_edges()/largest_E # 4)
Ns.append(current_N)
Es.append(current_E)
return Ns, Es, degrees_set
def plot_hubs_impact1(self, degrees_set, Es, block = True): #to use it first need to execute hubs_impact_check
"""
Plot_hubs_impact1 requires data that is generated by hubs_impact_check function.
It generates the plot that represents how the following degree groups contribute to the number of links present in the whole network.
"""
# Plot settings:
FONT = 15
plt.style.use('seaborn-whitegrid')
plt.figure(figsize=(5,5))
plt.xticks(fontsize=FONT-3)
plt.yticks(fontsize=FONT-3)
plt.xlabel("K", fontsize= FONT)
plt.ylabel("$L_K/L$", fontsize= FONT)
plt.title("Relation between K and subnetworks' links\n sizes; $s_1$", fontsize= FONT)
# Plotting the data
plt.plot(degrees_set, Es, "o", markersize=4, color="royalblue")
plt.show(block = block)
def plot_hubs_impact2(self, degrees_set, Es, Ns, block = True):
"""
Plot_hubs_impact2 requires data that is generated by hubs_impact_check function.
It generates the plot that represents how the following percentages of the total number of nodes contribute to
the total number of links present in the whole network.
"""
# Plot settings:
FONT=15
plt.style.use('seaborn-whitegrid')
plt.figure(figsize=(5,5))
sns.set_context("paper", rc={"font.size":FONT,"axes.titlesize":FONT,"axes.labelsize":FONT, "xtick.labelsize":FONT-3, "ytick.labelsize":FONT-3,
"legend.fontsize":FONT-3, "legend.titlesize":FONT-3})
# Plotting the data
fig = sns.scatterplot(x= Ns, y=Es, hue=np.log(degrees_set), palette="dark:blue_r")
fig.set(xlabel='$N_K/N$', ylabel='$L_K/L$', title="Relation between subnetworks' nodes\nand links sizes; $s_2$")
plt.legend(title="Log(K)", loc ="upper left", title_fontsize=FONT-3)
plt.show(block = block)
def calculate_assortativity_value(self):
# Calculation of the degree correlation coefficient:
return gt.assortativity(self.G, "total")[0]
def plot_ANND(self, normed = False, errorbar = True, block = True):
"""
plot_ANND generates Average Nearest Neighbour Degree plot that represents the mixing patterns between different groups of the nodes.
Each group consists of the nodes of the same degree.
"""
# Plot settings:
FONT = 15
plt.style.use('seaborn-whitegrid')
fig = plt.figure(figsize=(5,5))
plt.xlabel("Source degree (k)", fontsize = FONT)
plt.ylabel("$<k_{nn}(k)>$", fontsize = FONT)
title = "Average degree of\n the nearest neighbours" if normed == False else "Normed average degree of\n the nearest neighbours"
plt.title(title, fontsize = FONT)
# Calculating correlation vectors for ANND plot
h = gt.avg_neighbor_corr(self.G, "total", "total")
x = h[2][:-1]
y = h[0]
error = h[1]# yerr argument
# Taking into account "normed" parameter:
if normed == True:
N = self.G.num_vertices()
x = [i/N for i in x]
y = [i/N for i in y]
error = [i/N for i in error]
# Taking into account "errobar" parameter and plotting
if errorbar == True:
plt.errorbar(x, y, error, fmt="o", color="royalblue", markersize=4)
else:
plt.plot(x, y, "o", color="royalblue", markersize=4)
plt.show(block=block)
def one_node_cascade(self, fraction_to_fail, initial_node):
"""
one_node_cascade executes failure cascade simulation with the starting failure point equal to the provided initial node (1).
Failure cascade algorithm requires going constantly through the network and checking the nodes's statuses (2).
The current state of the node is changed to FAILED if the fraction of node's neighbours that have FAILED statuses exceeds
or is equal to fraction_to_fail number (3). Looping over the network finishes when no new FAILED status has been introduced
during the iteration (4). The output of the function is the number of nodes with the FAILED status at the end of the simulation (5).
"""
# Initializing a vector that represents statuses:
gprop = self.G.new_vertex_property("bool")
gprop[initial_node] = True #1)
go_on=True
while go_on == True: #2)
go_on=False #4 assume no new FAILED status in the upcoming iteration
for v in self.G.get_vertices(): #2)
if gprop[v] == 0: # check current node status
failures = gprop.a[self.G.get_all_neighbors(v)] # extract statuses of all the node's neighbours
if sum(failures)/len(failures) >= fraction_to_fail:
gprop[v]=1 #3
go_on=True # have had new FAILED status, must continue looping
cascade_size = sum(gprop.a)/len(gprop.a) #5)
return (initial_node, cascade_size)
def cascade_all_nodes(self, fraction_to_fail = 0.25):
"""
cascade_all_nodes runs failure cascade simulation (one_node_cascade) for each of the network's nodes to evaluate distribution
of the final cascade sizes. It returns a dictionary in which each node is assigned a value of the cascade size that it generated.
"""
nodes_numbers = []
cascade_sizes =[]
for v in self.G.get_vertices(): # Take each node
i, c = self.one_node_cascade(fraction_to_fail, v) # Run for it failure cascade
nodes_numbers.append(v)
cascade_sizes.append(c)
zip_iterator = zip(nodes_numbers, cascade_sizes) # Get pairs of elements.
dictionary_names_cascade = dict(zip_iterator) # Return dicitionary node_number:cascade_size
return dictionary_names_cascade
def plot_cascade(self, dictionary_names_cascade, fraction_to_fail):
"""
plot_cascade generates a histogram for the results of the cascade_all_nodes function.
It shows the distribution of the failure cascade sizes in the network.
"""
# Plot settings:
FONT = 15
plt.style.use('seaborn-whitegrid')
plt.figure(figsize=(5,5))
plt.title("Cascade size histogram C="+ str(fraction_to_fail), fontsize= FONT)
plt.xlabel("Value", fontsize= FONT)
plt.ylabel("Fraction of nodes", fontsize= FONT)
# Data transformation for the histogram:
cascade_sizes = list(dictionary_names_cascade.values())
unique, counts = np.unique(cascade_sizes, return_counts=True)
cascade_sizes_counts = dict(zip(unique, counts))
possible_cascade_sizes, counts = zip(*cascade_sizes_counts.items())
fractions = [i/sum(counts) for i in counts]
# Plotting:
plt.plot(possible_cascade_sizes, fractions,"*", color="royalblue",markersize=4)
plt.show(block=True)
def robustness_evaluation(self, map_G, step = 1):
"""
robustness_evaluation performs the robustness measurements according to the provided map_G.
Robustness measurements are performed by sorting the nodes according to the map_G values (1).
Then subsequent fractions of the nodes are taken according to the sorted pattern (2) and removed from the network
using the filtering option in graph-tool (3). In such a way new subgraphs that contain only not filtered-out (removed) nodes and edges between them
are generated (4). The largest component sizes of such subnetworks are calculated and returned.
"""
largest_N = self.G.num_vertices()
largest_E = self.G.num_edges()
giant_component_size = []
vertices_to_remove = map_G.a.argsort()[::-1] # 1)
f_previous = 0
# settings for a vector that represents whether a node should be taken or not when performing network filtering
gprop = self.G.new_vertex_property("bool")
self.G.vertex_properties["no_removal"] = gprop
for v in self.G.vertices():
self.G.properties[("v","no_removal")][v] = True
for fraction in range(0,100,step):
f = fraction/100
new_to_remove = vertices_to_remove[int(f_previous*largest_N):int(f*largest_N)] # 2) adding new nodes to be filtered
""" In order to reduce computational costs the filtering statuses are added subsequently. In other words in the first iteration
x nodes, equal to f_previous*largest_N, should be filtered (removed), so x nodes have no_removal = False. In new iteration x+y (int(f*largest_N))
nodes should be added the filtered status. However, already x nodes have no_removal = False, therefore only nodes from the range
int(f_previous*largest_N):int(f*largest_N) must change no_removal = False.
"""
for node in new_to_remove:
self.G.properties[("v","no_removal")][node] = False # 3)
f_previous = f
sub = GraphView(self.G, gprop) # 4)
comp, hist = graph_tool.topology.label_components(sub) #5)
giant_component_size.append(max(hist))
return giant_component_size #5)
def robustness_random_evaluation(self, N=10):
"""
Performs robustness assesment in terms of the random failures. It generates N times the random map corresponding to the random
order of the nodes. According to the map, in each iteration the removal is performed and the corresponding largest component sizes
are measured.
"""
giant_component_sizes = [self.robustness_evaluation(self.create_random_map()) for i in range(N)]
mean_gcs = np.array(giant_component_sizes).mean(axis=0)
return list(mean_gcs)
def plot_robustness(self, metrics_results, step = 1, block = False):
"""
plot_robustness generates the plots for the data generated by the robustness_evaluation function.
"""
# Plot settings:
FONT = 15
fraction = [i/100 for i in range(0,100,step)]
plt.figure(figsize = (5,5))
plt.style.use('seaborn-whitegrid')
plot_metric_labels = {"Degree": ["--*", "#D81B60"] , "Betweenness centrality": ["--o", "#1E88E5"],
"Closeness centrality" : ["--+","#FFC107"],
"Eigenvector centrality": ["--^", "#004D40"],
"Random failures":["--1", "black"]}
plt.xlabel("Fraction of nodes removed", fontsize = FONT)
plt.ylabel("Largest component size", fontsize = FONT)
plt.title("Robustness of the network", fontsize = FONT)
#Plotting:
for i in metrics_results:
data, metric_name = i
data = [i/max(data) for i in data]
plt.plot(fraction, data, plot_metric_labels[metric_name][0], label= metric_name, color=plot_metric_labels[metric_name][1], linewidth = 1, markersize = 7)
plt.legend()
plt.show(block=False)
def powerlaw(self, cutoff = False):
"""
powerlaw function adjust the power law distribution according to the Maximum likelihood method for the network's degree sequence.
The calculations are performed with the usage of poweRlaw library from R package and as the output the value of the adjusted
alpha parameter is returned. The adjustment is performed for all values from the degree sequence that are larger or equal to
the cutoff value. If cutoff == False then the cutoff is adjsuted automatically by optimizing the Kolomogrov distance
between the fitted power law and the data.
"""
robjects.r('''
powerlaws <- function(degrees, cutoff = FALSE){
degrees = as.integer(degrees)
#print(degrees)
# Set powerlaw object
my_powerlaw = displ$new(degrees)
# Estimate alpha value
est = estimate_pars(my_powerlaw)
# Estimate cutoff value as the one that minimizes Kolomogrov distance between the data and distribution model
if (cutoff == FALSE){
est2 = estimate_xmin(my_powerlaw)
my_powerlaw$setXmin(est2)
est = estimate_pars(my_powerlaw)
my_powerlaw$setPars(est$pars)
}
else{
my_powerlaw$setXmin(cutoff)
est = estimate_pars(my_powerlaw)
my_powerlaw$setPars(est$pars)
}
# Calculate likelihood of the model
likelihood = dist_ll(my_powerlaw)
# Calculate percentage of data covered by the powerlaw
percentage = length(degrees[which(degrees>=my_powerlaw$xmin)])/length(degrees)
#print(degrees[which(degrees>=my_powerlaw$xmin)])
# Data for plotting the results
data = plot(my_powerlaw)
fit = lines(my_powerlaw)
return(list(data, fit, my_powerlaw$xmin, my_powerlaw$pars, percentage, likelihood, my_powerlaw))
#return(c(my_powerlaw$xmin, my_powerlaw$pars))
#statistical_test = bootstrap_p(m, no_of_sims = 1000, threads = 2)
#p_value = statistical_test$p
}''')
# Make R funtion available for python:
powerlaw = robjects.globalenv['powerlaws']
# Prepare the degree sequence:
degree_map = self.create_degree_distribution_map().fa
degree_map = degree_map.tolist()
# Perform calculations:
power_law_result = powerlaw(degree_map, cutoff)
plotting_data = (power_law_result[0][0], power_law_result[0][1], power_law_result[1][0], power_law_result[1][1])
kmin = power_law_result[2][0]
alpha = power_law_result[3][0]
percentage = power_law_result[4][0]
likelihood = power_law_result[5][0]
my_powerlaw = power_law_result[6]
return (kmin, alpha, percentage, likelihood, plotting_data, my_powerlaw)
def bootstrap_powerlaw(self, my_powerlaw, N=100):
"""
bootstrap_powerlaw calculates the p-value for H0: degree sequence comes from the power law distirbution with parameters: estimated alpha and cutoff;
H1: It does not come. The test is performed according to bootstrap_p function from poweRlaw package that simulates N times the data from the distirbution
and calculates how many times the distance between the theoretical and simulational distributions was larger or equal to the one for the degree sequence.
"""
robjects.r('''
assess_p_value <- function(my_powerlaw, N){
statistical_test = bootstrap_p(my_powerlaw, no_of_sims = N, threads = 2)
return(statistical_test$p)
}''')
p_value = robjects.globalenv['assess_p_value']
p = p_value(my_powerlaw, N)[0]
return p
def plot_powerlaw(self, plotting_data, block = False):
"""
plot_powerlaw function visualises the power law fit and the data on the log log scale.
"""
FONT = 15
# Data preparation:
datax = plotting_data[0]
datay = plotting_data[1]
fitx = plotting_data[2]
fity = plotting_data[3]
# Plot settings:
plt.figure(figsize =(5,5))
plt.style.use('seaborn-whitegrid')
plt.xlabel("log k", fontsize = FONT)
plt.ylabel("log P(X<k)", fontsize = FONT)
plt.title("Power law fit", fontsize = FONT)
# Plotting:
plt.plot(np.log(datax), np.log(datay), "o", markersize=4, color="#1E88E5")
plt.plot(np.log(fitx), np.log(fity), linewidth = 3, color = "#FFC107")
plt.show(block = block)
# Defining additional ipywidget that will perform data download after button hitting - DownloadButton
class DownloadButton(ipywidgets.Button):
"""
Download button with dynamic content
The content is generated using a callback when the button is clicked. It is defined as an extension of "button" class in ipywidgets (source: https://stackoverflow.com/questions/61708701/how-to-download-a-file-using-ipywidget-button).
"""
def __init__(self, filename: str, contents: Callable[[], str], **kwargs):
super(DownloadButton, self).__init__(**kwargs)
self.filename = filename
self.contents = contents
self.on_click(self.__on_click)
def __on_click(self, b):
contents: bytes = self.contents().encode('utf-8')
b64 = base64.b64encode(contents)
payload = b64.decode()
digest = hashlib.md5(contents).hexdigest() # bypass browser cache
id = f'dl_{digest}'
display(HTML(f"""
<html>
<body>
<a id="{id}" download="{self.filename}" href="data:text/csv;base64,{payload}" download>
</a>
<script>
(function download() {{
document.getElementById('{id}').click();
}})()
</script>
</body>
</html>
"""))
# Graphical User Interface:
class GUI_for_network_analysis:
def __init__(self):
# Initializing the variables and the GUI elements:
self.G = None
self.initial_info = widgets.HTML(value = "<b><font color='#555555';font size =5px;font family='Helvetica'>ETNA: Extensive Tool for Network Analysis</b>")
self.instruction_header = widgets.HTML(value = "<b><font color='#555555';font size =4px;font family='Helvetica'>Instruction:</b>")
self.instruction = widgets.HTML(value = "<b><font color='#555555';font size =2.5px;font family='Helvetica'>1. Provide file name with with .graphml or .csv extension. <br>2. Hit 'Prepare the network' button (Parallel links and nodes not from the largest component will be removed. Network is also set as undirected). <br>3. Choose the tab of interest. <br>4. Adjust method settings if present.<br>5. Run the method by hitting the tab's 'Run button'. The calculations will be performed and the appropriate plot will be displayed on the right.<br>6. If you want to run a new analysis for a new network hit ' ETNA' button. </b>")
self.file_name_textbox = widgets.Text(value='Provide file name here',
placeholder='Type something',
description='Network:',
disabled=False,
align_items='center',
layout=Layout(width='40%')#, height='10px')
)
self.button_graph_preparation = widgets.Button(value=False,
description='Prepare the network',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Description',
icon='check', # (FontAwesome names without the `fa-` prefix)
layout=Layout(width='40%', height='20%'),
style= {'button_color':'#FFAAA7'}
)
self.links_nodes_number_info = widgets.Label(value="")
self.label_centrality = widgets.HTML(value = "<b><font color='black';font size =2px;font family='Helvetica'>Histograms of centrality measures</b>")
self.centrality_choice = widgets.Dropdown(
options=['Choose from the list','Degree', 'Betweenness centrality', 'Closeness centrality',
'Eigenvector centrality', "Clustering coefficient"],
description='Measure: ',
disabled=False,
layout=Layout(width='90%')
)
self.button_centrality = widgets.Button(value=False,
description='Run',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Description',
icon='check', # (FontAwesome names without the `fa-` prefix)
layout=Layout(width='90%', height='20%'),
style= {'button_color':'#98DDCA'}
)
self.centrality_out = widgets.Output()
self.info_mini = widgets.HTML(value = "<b><font color='black';font size =2px;font family='Helvetica'>Minimum: </b>")
self.info_mini_value = widgets.Label(value = "")
self.info_maxi = widgets.HTML(value = "<b><font color='black';font size =2px;font family='Helvetica'>Maximum: </b>")
self.info_maxi_value = widgets.Label(value = "")
self.info_avg = widgets.HTML(value = "<b><font color='black';font size =2px;font family='Helvetica'>Average: </b>")
self.info_avg_value = widgets.Label(value = "")
self.info_std = widgets.HTML(value="<b><font color='black';font size =2px;font family='Helvetica'>Standard deviation: </b>")
self.info_std_value = widgets.Label(value = "")
self.button_assortativity = widgets.Button(value=False,
description='Run',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Description',
icon='check', # (FontAwesome names without the `fa-` prefix)
layout=Layout(width='90%', height='20%'),
style= {'button_color':'#98DDCA'}
) #można zrobić pogrubione (działa) , "font_weight":"bold" dodać do stylu
self.label_corr_value = widgets.Label(value = "") #było " "
self.label_ANND_plot = widgets.HTML(value = "<b><font color='black';font size =2px;font family='Helvetica'>Assortativity examination: Average Nearest Neighbour Degree (ANND) plot and degree correlation coefficient</b>")
self.label_ANND_plot_settings = widgets.Label(value = "ANND plot settings:")
self.ANND_plot_settings_normed = widgets.Checkbox(value=False,
description='Normed ANND',
disabled=False,
indent=False)
self.ANND_plot_settings_errorbar = widgets.Checkbox(value=False,
description='Errorbars',
disabled=False,
indent=False)
self.assortativity_out = widgets.Output()
self.hubs_impact_choice = widgets.Dropdown(
options=['Choose from the list','s1', 's2'],
description='Measure: ',
disabled=False,
layout=Layout(width='90%')
)
self.hubs_impact_button = widgets.Button(value=False,
description='Run',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Description',
icon='check', # (FontAwesome names without the `fa-` prefix)
layout=Layout(width='90%', height='20%'),
style= {'button_color':'#98DDCA'}
)
self.label_hubs_impact = widgets.HTML(value = "<b><font color='black';font size =2px;font family='Helvetica'>Plots of s1 and s2</b>")
#self.label_hubs_impact_explain = widgets.Label(value = "Hubs impact examination consists of creating subnetworks.. i tutaj walnąć ten ładny matematyczny zapis z mgr")
self.hubs_impact_out = widgets.Output()
self.button_robustness = widgets.Button(value=False,
description='Run',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Description',
icon='check', # (FontAwesome names without the `fa-` prefix)
layout=Layout(width='90%', height='20%'),
style= {'button_color':'#98DDCA'}
)
self.robustness_degree = widgets.Checkbox(value=True,
description='Degree',
disabled=False,
indent=False)
self.robustness_betweenness = widgets.Checkbox(value=False,
description='Betweennness centrality',
disabled=False,
indent=False)
self.robustness_closeness = widgets.Checkbox(value=False,
description='Closeness centrality',
disabled=False,
indent=False)
self.robustness_eigenvector = widgets.Checkbox(value=False,
description='Eigenvector centrality',
disabled=False,
indent=False)
self.robustness_random = widgets.Checkbox(value=False,
description='Random failures',
disabled=False,
indent=False)
self.label_robustness_info = widgets.HTML(value = "<b><font color='black';font size =2px;font family='Helvetica'>Examination of the network robustness</b>")
self.label_robustness_settings = widgets.Label(value = "Choose metrics for the network robustness examination:")
self.robustness_out = widgets.Output()
self.robustness_random_label = widgets.Label(value = "Number of Monte Carlo repetitions for random failures")
self.robustness_random_value = widgets.IntSlider(value = 10, min=0, max=1000, step=10,
description='',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True
)
self.cascade_info = widgets.HTML(value = "<b><font color='black';font size =2px;font family='Helvetica'>Simulation of failure cascade</b>")
self.button_cascade = widgets.Button(value=False,
description='Run',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Description',
icon='check', # (FontAwesome names without the `fa-` prefix)
layout=Layout(width='90%', height='20%'),
style= {'button_color':'#98DDCA'}
)
self.cascade_fraction_to_fail = widgets.FloatSlider(value=0.25, min=0, max=1, step=0.05,
description='',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f')
self.cascade_fraction_to_fail_label = widgets.Label(value = "Failure fraction")
self.cascade_out = widgets.Output()
self.label_powerlaw = widgets.HTML(value = "<b><font color='black';font size =2px;font family='Helvetica'>Fitting power law to the degree sequence using Maximum Likelihood estimator</b>")
self.powerlaw_settings = widgets.HTML(value = "Settings:")
self.powerlaw_pvalue = widgets.Checkbox(value=False,
description='Calculate p-value',
disabled=False,
indent=False)
self.bootstrap_settings_label = widgets.Label(value = "Number of simulations for bootstrap")
self.bootstrap_settings = widgets.IntSlider(value=100, min=50, max=1000, step=50,
description='',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True
)
self.bootstrap_settings.layout.visibility = 'hidden'
self.bootstrap_settings_label.layout.visibility = 'hidden'
self.cutoff_settings = widgets.Checkbox(value=True,
description='Cutoff value according to Kolomogrov distance',
disabled=False,
indent=False)
self.cutoff_label = widgets.Label(value = "Cutoff value")
self.cutoff_label.layout.visibility = 'hidden'
self.cutoff = widgets.IntSlider(value = 1, min=1, max=100, step=1,
description='',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True
)
self.cutoff.layout.visibility = 'hidden'
self.pvalue_label = widgets.HTML(value = "<b><font color='black';font size =2px;font family='Helvetica'>P-value:</b>")
self.pvalue_value = widgets.Label(value="")
self.pvalue_label.layout.visibility = 'hidden'
self.pvalue_value.layout.visibility = 'hidden'
self.powerlaw_button = widgets.Button(value=False,
description='Run',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Description',
icon='check', # (FontAwesome names without the `fa-` prefix)
layout=Layout(width='90%', height='20%'),
style= {'button_color':'#98DDCA'}
)
self.powerlaw_out = widgets.Output()
self.restart_button = widgets.Button(value=False,
description='Restart ETNA',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Description',
icon='check', # (FontAwesome names without the `fa-` prefix)
layout=Layout(width='40%', height='100%'),
style= {'button_color':'#FFD3B4'}
)
self.error_info = widgets.HTML(value = " ")
self.plot_label = widgets.HTML(value = "Plot and info")
self.download_button = DownloadButton(filename='data.csv', contents=lambda: f'', description='Download data')
self.download_button.layout.visibility = 'hidden'
self.download_button.layout.width = '90%'
self.download_button.style.button_color = '#D5ECC2'
self.dataframe = None
def button_graph_preparation_click(self, button):
"""
Defines what to do when the graph preparation button is clicked.
"""
self.clear()
# Error handling:
if self.file_name_textbox.value == "" or self.file_name_textbox.value == 'Provide file name here':
self.file_name_textbox.value = "No file name provided. Provide file name here."
return None
if ".graphml" not in self.file_name_textbox.value and ".csv" not in self.file_name_textbox.value:
self.file_name_textbox.value = "Incorrect file name. File must have .graphml or .csv extension."
return None
self.button_graph_preparation.description = "Preparing..."
self.error_info.value = " "
# Graph upload from the file:
self.G = My_Network(self.file_name_textbox.value)
# Graph preparation - removal of the parallel edges, non-connected components etc.:
self.G.prepare_the_network()
self.button_graph_preparation.description = "Network is ready! Now choose the tool below."
self.button_graph_preparation.style.button_color = '#D5ECC2'
self.links_nodes_number_info.value = "Number of nodes: "+str(self.G.G.num_vertices())+", Number of links: " + str(self.G.G.num_edges())
def centrality_button_click(self, b):
"""
Binds the centrality measure button from the centrality tab with the appropriate map (1), plot generation (2) and statistics calculations (3).
"""
self.clear()
with self.centrality_out:
if self.centrality_choice.value == "Choose from the list":
pass
else:
# 1):
if self.error() == True:
return None
else:
centrality_choices_functions = {'Degree':self.G.create_degree_distribution_map,
'Betweenness centrality':self.G.create_betweenness_distribution_map,
'Closeness centrality': self.G.create_closeness_distribution_map,
'Eigenvector centrality':self.G.create_eigenvector_distribution_map,
"Clustering coefficient": self.G.create_clustering_map}
my_map = centrality_choices_functions[self.centrality_choice.value]()
fig, ax = self.G.plot_map_histogram(my_map, self.centrality_choice.value) # 2)
self.retrieve_data(my_map, "Centrality and clustering")
my_map = list(my_map.fa)
# 3)
self.info_mini_value.value = str(min(my_map))
self.info_maxi_value.value = str(max(my_map))
self.info_avg_value.value = str(round(np.mean(my_map),4))
self.info_std_value.value = str(round(np.std(my_map),4))
self.info_mini = widgets.HTML(value = "<b><font color='black';font size =2px;font family='Helvetica'>Minimum: </b>")
self.info_maxi = widgets.HTML(value = "<b><font color='black';font size =2px;font family='Helvetica'>Maximum: </b>")
self.info_avg = widgets.HTML(value = "<b><font color='black';font size =2px;font family='Helvetica'>Average: </b>")
self.info_std = widgets.HTML(value="<b><font color='black';font size =2px;font family='Helvetica'>Standard deviation: </b>")
display(VBox(children = [
HBox(children= [self.info_mini, self.info_mini_value]),
HBox(children= [self.info_maxi, self.info_maxi_value]),
HBox(children= [self.info_avg, self.info_avg_value]),
HBox(children= [self.info_std, self.info_std_value])
]))
def assortativity_button_click(self, b):
"""
Binds the assortativity button with the ANND plot generation (1) and degree correlation calculations (2).
"""
self.clear()
if self.error() == True:
return None
else:
corr_value = round(self.G.calculate_assortativity_value(),3)
corr_meaning = "assortative" if corr_value>0 else "disassortative"
self.label_corr_value.value = "Degree correlation coefficient equals " + str(corr_value)+". Graph has "+ corr_meaning +' mixing patterns with regards to the degree.' # 2
with self.assortativity_out:
self.assortativity_out.clear_output()
self.G.plot_ANND(normed = self.ANND_plot_settings_normed.value, errorbar = self.ANND_plot_settings_errorbar.value, block = False) # 1
def hubs_impact_choice_plot(self, b):
"""
Binds the hubs impact button with the hubs impact plot generation. Data is firstly calculated by calling hubs_impact check function (1) and then plotted (2).
"""
self.clear()
with self.hubs_impact_out:
if self.hubs_impact_choice.value == "Choose from the list":
pass
else:
if self.error() == True:
return None
else:
if self.hubs_impact_choice.value == "s1":
Ns, Es, degrees_set = self.G.hubs_impact_check() # 1
self.G.plot_hubs_impact1(degrees_set, Es, block = False) # 2
if self.hubs_impact_choice.value == "s2":
Ns, Es, degrees_set = self.G.hubs_impact_check() # 1
self.G.plot_hubs_impact2(degrees_set, Es, Ns, block = False) # 2
def cascade_button_click(self, b):
"""
Binds the cascade button with fialure cascade simulation performance (1), plotting (2) and the statistics calculations (3).
"""
self.clear()
if self.error() == True:
return None
else:
# Button settings:
self.button_cascade.style.button_color = '#FFAAA7'
self.button_cascade.description = "Running..."
# Data generation:
cascade_data = self.G.cascade_all_nodes(fraction_to_fail = self.cascade_fraction_to_fail.value) # 1)
self.retrieve_data(cascade_data, "Cascade")
with self.cascade_out:
self.cascade_out.clear_output()
self.G.plot_cascade(cascade_data, fraction_to_fail = self.cascade_fraction_to_fail.value) # 2)
# 3):
self.info_mini_value.value = str(min(cascade_data.values()))
self.info_maxi_value.value = str(max(cascade_data.values()))
self.info_avg_value.value = str(round(np.mean(list(cascade_data.values())),4))
self.info_std_value.value = str(round(np.std(list(cascade_data.values())),4))
self.info_mini = widgets.HTML(value = "<b><font color='black';font size =2px;font family='Helvetica'>Minimum: </b>")
self.info_maxi = widgets.HTML(value = "<b><font color='black';font size =2px;font family='Helvetica'>Maximum: </b>")
self.info_avg = widgets.HTML(value = "<b><font color='black';font size =2px;font family='Helvetica'>Average: </b>")
self.info_std = widgets.HTML(value="<b><font color='black';font size =2px;font family='Helvetica'>Standard deviation: </b>")
display(VBox(children = [
HBox(children= [self.info_mini, self.info_mini_value]),
HBox(children= [self.info_maxi, self.info_maxi_value]),
HBox(children= [self.info_avg, self.info_avg_value]),
HBox(children= [self.info_std, self.info_std_value])
]))
self.button_cascade.description = "Run failure cascade simulation"
self.button_cascade.style.button_color = '#98DDCA'
def robustness_button_click(self, b):
"""
Binds robustness button with the robustness button with the reboustness examination.
In the call the data is generated (1) and then plotted (2).
"""
self.clear()
if self.error() == True:
return None
else:
self.button_robustness.style.button_color = '#FFAAA7'
self.button_robustness.description = "Running..."
metrics_to_run = {self.robustness_degree:[self.G.create_degree_distribution_map, "Degree"],
self.robustness_betweenness:[self.G.create_betweenness_distribution_map, "Betweenness centrality"] ,
self.robustness_closeness:[self.G.create_closeness_distribution_map, 'Closeness centrality'],
self.robustness_eigenvector:[self.G.create_eigenvector_distribution_map,'Eigenvector centrality'],
self.robustness_random:[]}
results_to_plot = []
for metric in metrics_to_run.keys():
if metric.value == True:
if metric == self.robustness_random:
results = self.G.robustness_random_evaluation(N=self.robustness_random_value.value)
results_to_plot.append([results, "Random failures"])
else:
[function, metric_name] = metrics_to_run[metric]
map_G = function()
results = self.G.robustness_evaluation(map_G) # 1
results_to_plot.append([results, metric_name])
self.retrieve_data(results_to_plot, "Robustness")
with self.robustness_out:
self.robustness_out.clear_output()
self.G.plot_robustness(results_to_plot, block=True) # 2
self.button_robustness.description = "Run"
self.button_robustness.style.button_color = '#98DDCA'
def robustness_random_true(self, b):
"""
Function for handling the robustness settings for random failures.
It makes visible the bar for the adjustment of the number of Mone Carlo repetitions if the random failures measurements are chosen .
"""
if self.robustness_random.value == True:
self.robustness_random_label.layout.visibility = 'visible'
self.robustness_random_value.layout.visibility = 'visible'
else:
self.robustness_random_label.layout.visibility = 'hidden'
self.robustness_random_value.layout.visibility = 'hidden'
def powerlaw_button_click(self, b):
"""
Binds the powerlaw button with the power law adjustment to the degree sequence. Parameters are calculated (1), the fit is plotted (2) and the statistics are calculated (3).
"""
self.clear()
if self.error() == True:
return None
else:
pvalue = "Not calculated"
self.powerlaw_button.description = "Running..."
self.powerlaw_button.style.button_color = '#FFAAA7'
cutoff = self.cutoff.value if self.cutoff_settings.value == False else False
(kmin, alpha, percentage, likelihood, plotting_data, my_powerlaw) = self.G.powerlaw(cutoff) # 1)
if self.powerlaw_pvalue.value == True:
# calculate also p-value
N = self.bootstrap_settings.value
pvalue = self.G.bootstrap_powerlaw(my_powerlaw, N)
pvalue = str(round(pvalue, 4))
self.pvalue_label.layout.visibility = 'visible'
self.pvalue_value.layout.visibility = 'visible'
with self.powerlaw_out:
self.powerlaw_out.clear_output()
self.G.plot_powerlaw(plotting_data, block = True) # 2)
# 3:
self.info_mini.value = "<b><font color='black';font size =2px;font family='Helvetica'>Cutoff: </b>"
self.info_mini_value.value = str(kmin)
self.info_maxi.value = "<b><font color='black';font size =2px;font family='Helvetica'>Power law parameter alpha: </b>"
self.info_maxi_value.value = str(round(alpha,4))
if alpha>3 or alpha<2:
self.info_maxi_value.value+= ", ANOMALOUS REGIME!, standard: 2<alpha<3"
self.info_avg.value = "<b><font color='black';font size =2px;font family='Helvetica'>Percentage of data covered: </b>"
self.info_avg_value.value = str(round(percentage*100,4))
self.info_std.value = "<b><font color='black';font size =2px;font family='Helvetica'>Likelihood: </b>"
self.info_std_value.value = str(round(likelihood,4))
self.pvalue_value.value = pvalue
display(VBox(children = [
HBox(children= [self.info_mini, self.info_mini_value]),
HBox(children= [self.info_maxi, self.info_maxi_value]),
HBox(children= [self.info_std, self.info_std_value]),
HBox(children= [self.info_avg, self.info_avg_value]),
HBox(children= [self.pvalue_label, self.pvalue_value])
]))
self.powerlaw_button.description = "Run"
self.powerlaw_button.style.button_color = '#98DDCA'
def powerlaw_pvalue_true(self, b):
"""
Function for handling the powerlaw settings. It makes visible the bootstrap settings if the pvalue is to be assesed (pvalue checkbox is True).
"""
if self.powerlaw_pvalue.value == True:
self.bootstrap_settings.layout.visibility = 'visible'
self.bootstrap_settings_label.layout.visibility = "visible"
else:
self.bootstrap_settings.layout.visibility = 'hidden'
self.bootstrap_settings_label.layout.visibility = "hidden"
def powerlaw_cutoff(self, b):
"""
Function for handling the powerlaw settings. It makes visible the cutoff choice bar if the default option for cutoff adjustment using the Kolomogrov distance is not chosen.
"""
if self.cutoff_settings.value == False:
self.cutoff_label.layout.visibility = "visible"
self.cutoff.layout.visibility = 'visible'
if self.error(return_message = False) == True:
return None
else:
degree_values = self.G.create_degree_distribution_map().fa
self.cutoff.min = min(degree_values)
self.cutoff.max = max(degree_values)
self.cutoff.value = self.cutoff.min
else:
self.cutoff_label.layout.visibility = "hidden"
self.cutoff.layout.visibility = 'hidden'
def display(self):
"""
Displays all the elements of the GUI in the appropriate order to form the interface.
"""
display(self.initial_info)
display(self.instruction_header)
display(self.instruction)
preparation = VBox(children = [self.file_name_textbox, self.button_graph_preparation, self.links_nodes_number_info], layout = Layout(width = "100%"))
display(preparation)
tabs_preparation = self.tabs
outs = VBox(children = [self.centrality_out, self.hubs_impact_out,
self.assortativity_out, self.label_corr_value,
self.robustness_out, self.cascade_out, self.powerlaw_out,
self.download_button
]) # self.clustering_out
all = HBox(children = [tabs_preparation, outs])
display(all)
display(self.error_info)
display(self.restart_button)
def bind(self):
"""
Binds buttons and other interactivities with the corresponding action functions.
"""
# Bind prepare graph button with the preparation function:
self.button_graph_preparation.on_click(self.button_graph_preparation_click)
# Bind centrality choice button with the centrality examination and centrality tab
self.button_centrality.on_click(self.centrality_button_click)
self.tab_centrality = VBox(children=[self.label_centrality, self.centrality_choice, self.button_centrality])
# Bind hubs_impact button with the plot generation and hubs_impact tab
self.hubs_impact_button.on_click(self.hubs_impact_choice_plot)
self.tab_hubs_impact = VBox(children=[self.label_hubs_impact, self.hubs_impact_choice, self.hubs_impact_button])
# Bind assortativity button with the assortativity examination and assortativity tab
self.button_assortativity.on_click(self.assortativity_button_click)
self.tab_assortativity = VBox(children=[self.label_ANND_plot, self.label_ANND_plot_settings,
self.ANND_plot_settings_errorbar, self.ANND_plot_settings_normed, self.button_assortativity
])
# Bind robustness button with the robustness examination and robustness tab
self.robustness_random_results = interactive_output(self.robustness_random_true, {"b":self.robustness_random}) #interactive_output(self.robustness_random, {"b":self.robustness_random_true})
self.button_robustness.on_click(self.robustness_button_click)
self.robustness = VBox(children=[self.label_robustness_info, self.label_robustness_settings, self.robustness_degree, self.robustness_betweenness,
self.robustness_closeness,
self.robustness_eigenvector,
self.robustness_random,
self.robustness_random_results,
self.robustness_random_label,
self.robustness_random_value,
self.button_robustness])
# Bind cascade button with the failure cascade examination and cascade tab
self.button_cascade.on_click(self.cascade_button_click)
self.tab_cascade = VBox(children=[self.cascade_info, HBox(children = [self.cascade_fraction_to_fail_label, self.cascade_fraction_to_fail]),
self.button_cascade])
# Bind powerlaw button with the powerlaw examination, bind powerlaw settings with the corresponding actions, add all to the powerlaw tab
self.powerlaw_button.on_click(self.powerlaw_button_click)
self.powerlaw_bootstrap = interactive_output(self.powerlaw_pvalue_true, {'b':self.powerlaw_pvalue})
self.powerlaw_cutoff = interactive_output(self.powerlaw_cutoff, {'b':self.cutoff_settings})
self.tab_powerlaw = VBox(children = [self.label_powerlaw, self.powerlaw_settings, self.powerlaw_pvalue,
self.powerlaw_bootstrap,
self.bootstrap_settings_label, self.bootstrap_settings,
self.powerlaw_cutoff, self.cutoff_settings, self.cutoff_label,
self.cutoff,
self.powerlaw_button])
# Joining tabs in the GUI
self.tabs = widgets.Accordion(children = [self.tab_centrality, self.tab_powerlaw,
self.tab_hubs_impact, self.tab_assortativity, self.robustness, self.tab_cascade],
layout=Layout(width='40%', min_width = "300px",
), selected_index = None) #self.tab_clustering bylo kiedys,
#layout in_height='500px',max_height='500px', display='flex'align_items='stretch'
# Additional tabs' settings
self.tabs.set_title(0, '> Centrality and clusterization ')
self.tabs.set_title(1, '> Power law fitting')
self.tabs.set_title(2, '> Subnetworks: s1 and s2')
self.tabs.set_title(3, '> Assortativity')
self.tabs.set_title(4, '> Robustenss')
self.tabs.set_title(5, '> Failure cascade')
# Bind restart button with the restart function
self.restart_button.on_click(self.gui_restart)
def gui_restart(self,b):
"""
Sets everything to the initial settings by cleaning the output widgets, fixing colors, bringing original texts to the labels and buttons.
"""
self.G = None
self.file_name_textbox.value = "Provide file name here"
self.button_graph_preparation.description = "Prepare the graph"
self.button_graph_preparation.style.button_color = "#FFAAA7"
self.links_nodes_number_info.value = ""
self.centrality_choice.value = "Choose from the list"
self.centrality_out.clear_output()
#self.clustering_out.clear_output()
self.hubs_impact_choice.value = "Choose from the list"
self.hubs_impact_out.clear_output()
self.label_corr_value.value = ""
self.ANND_plot_settings_normed.value = False
self.ANND_plot_settings_errorbar.value = False
self.assortativity_out.clear_output()
self.cascade_fraction_to_fail.value = 0.25
self.cascade_out.clear_output()
self.robustness_degree.value = False
self.robustness_betweenness.value = False
self.robustness_closeness.value = False
self.robustness_eigenvector.value = False
self.robustness_random.value = False
self.robustness_out.clear_output()
self.powerlaw_pvalue.value = False
self.cutoff_settings.value = True
self.powerlaw_out.clear_output()
#self.data_preview.clear_output()
#self.data_preview_button.layout.visibility = 'hidden'
self.download_button.layout.visibility = 'hidden'
def error(self, return_message = True):
"""
Used for error handling - checks if the file is provided in the appropriate format. This functions is called always before running any of the methods in the GUI.
"""
if self.G == None or self.file_name_textbox.value == "No file name provided. Provide file name here." or self.file_name_textbox.value == "":
if return_message==True:
self.error_info.value = "<b><font color='#FFAAA7';font size =3px;font family='Helvetica'>Cannot use the method. Provide file name and prepare the network first.</b>"
return True
def clear(self):
"""
Clears the outputs. Used to make previous plots and statistics disappear from the GUI when the new method is called.
This functions is called always before running any of the methods in the GUI.
"""
self.centrality_out.clear_output()
self.hubs_impact_out.clear_output()
self.assortativity_out.clear_output()
self.robustness_out.clear_output()
#self.clustering_out.clear_output()
self.cascade_out.clear_output()
self.powerlaw_out.clear_output()
self.label_corr_value.value = ""
#self.data_preview.clear_output()
#self.data_preview_button.layout.visibility = 'hidden'
self.download_button.layout.visibility = 'hidden'
def retrieve_data(self, data, method):
"""
Used to gather the data from the method functions so that it is downloadable.
Called in 3 cases - when the robustness, cascade or Centrality and clustering methods are chosen.
"""
if method == "Centrality and clustering":
my_map = data
my_map_values = my_map.a[self.G.G.get_vertices()]
nodes = self.G.G.get_vertices()
self.dataframe = pd.DataFrame({"NodeIndex":nodes, "MeasureValue": my_map_values})
#self.data_preview_button.layout.visibility = 'visible'
self.download_button.layout.visibility = 'visible'
self.dataframe = self.dataframe.to_csv()
self.download_button.contents = lambda: self.dataframe
if method == "Robustness":
results_to_plot = data
dataframe = {}
for row in results_to_plot:
dataframe[row[1]] = row[0]
self.dataframe =
|
pd.DataFrame(dataframe)
|
pandas.DataFrame
|
import os
import pytz
from collections import namedtuple
from datetime import datetime, timedelta
import requests
from dotenv import load_dotenv
import pandas as pd
from tensorflow.keras.models import load_model
import numpy as np
# Named tuple for aid in the data parse
fields = ['date', 'open', 'close', 'high', 'low', 'vols']
TickerData = namedtuple('TickerData', fields)
def last_close():
est = pytz.timezone('US/Eastern')
utc = pytz.utc
# TIME_FORMAT = '%H:%M:%S'
# DATE_FORMAT = '%Y-%m-%d'
est_time_now = datetime.now(tz=utc).astimezone(est)
est_date = est_time_now.replace(hour=0, minute=0, second=0, microsecond=0)
market_open = est_date + timedelta(hours=9.5)
market_close = est_date + timedelta(hours=16)
if est_time_now > market_open and est_time_now < market_close:
# print('Stock Market Is Open')
last_record_date = est_date + timedelta(days=-1)
else:
# print('Stock Market Is Closed')
if est_time_now < market_open:
last_record_date = est_date + timedelta(days=-1)
else:
last_record_date = est_date
return last_record_date
def get_stock_data(stock_symbol, start_date, end_date):
project_dir = os.getcwd()
env_file = os.path.join(project_dir, '.env')
load_dotenv(dotenv_path=env_file,verbose=True)
TIINGO_API_KEY = os.getenv("TIINGO_API_KEY")
assert TIINGO_API_KEY
"""
Make an REST API call to the tiingo API to get historic stock data
Parameters
----------
stock_symbol : str
US stock market symbol
start_date : str
yyyy-mm-dd formated date that begins time series
end_date : str
yyyy-mm-dd formated date that ends the time series
returns
-------
response : request.response
The response object to be parsed
"""
base_url = f'https://api.tiingo.com/tiingo/daily/{stock_symbol}/prices?'
payload = {
'token':TIINGO_API_KEY,
'startDate':start_date,
'endDate':end_date
}
response = requests.get(base_url, params=payload)
return response
def parse_json(response):
"""
Parameters
----------
response : requests.response object
The response object to be parsed
Returns
-------
records : list
list of named tuples that represent the ticker data
"""
json_response = response.json()
records = []
for json_object in json_response:
d = json_object['date']
o = json_object['open']
c = json_object['close']
h = json_object['high']
l = json_object['low']
v = json_object['volume']
ticker_data = TickerData(d, o, c, h, l, v)
records.append(ticker_data)
return records
def model_path(debug=False):
project_dir = os.getcwd()
models_dir = os.path.join(project_dir,'models')
model_path = os.path.join(models_dir,'lstm_forecast.h5')
if debug:
print(model_path)
try:
assert os.path.exists(model_path)
except AssertionError as e:
print('----'*20)
print('INVALID FILE PATH FOR MODEL ---> {}'.format(model_path))
print('----'*20)
model_path = None
return model_path
def market_predict():
est = pytz.timezone('US/Eastern')
ticker = 'SPY'
end_date = last_close().astimezone(est)
start_date = end_date + timedelta(days=-175)
# print(start_date.strftime(r'%Y-%m-%d'))
# print(end_date.strftime(r'%Y-%m-%d'))
response = get_stock_data(
ticker,
start_date.strftime(r'%Y-%m-%d'),
end_date.strftime(r'%Y-%m-%d'))
records = parse_json(response)
df = pd.DataFrame(records)
# ---------------Fix the date to be UTC equivalent of EST Stock Market Close
utc = pytz.utc
est = pytz.timezone('US/Eastern')
date_format='%Y-%m-%d'
# Convert datestring to datetime tz-naive
df['date'] = pd.to_datetime(df['date'], format=date_format, exact=False).dt.tz_localize(None)
# add 16 hours to tz-naive datetime
df['date'] = df['date'] + pd.DateOffset(hours=16)
# localize 1600 to est timezone
df['date'] = df['date'].dt.tz_localize(est)
# convert EDT to UTC time
df['date'] = df['date'].dt.tz_convert(utc)
# ---------------------------------------------------------------------------
df.set_index('date', inplace=True)
df['vols_adj'] = np.log(df['vols'])*10
df['pred_close'] =np.nan
# select features to feed into prediction
features = ['close', 'open', 'high', 'low','vols_adj']
# Select most recent 120 trading days
df_feature_predict = df.iloc[-120:, :]
df_feature_predict.reset_index(inplace=True)
# 1x60 array
df_feature_predict = df_feature_predict[features]
dataset = df_feature_predict.values
# normalization values for close, open, high, low, vols_adj durring training of the model
# if the model is retrained, these numbers will need to be updated
data_mean = [292.17135583, 292.21083333, 293.55247083, 290.60442333, 180.37830249]
data_std = [6.18969688, 6.25379573, 5.93148516, 6.41236015, 3.51395123]
dataset_norm = (dataset - data_mean)/data_std
X_test = dataset_norm.reshape(1, 120, 5)
# print('Loading Keras Model: {}'.format(model_path()))
model = load_model(model_path())
pred = model.predict(X_test)
assert pred.shape == (1,60)
# prediction data
pred_denormalized = pred * data_std[0] + data_mean[0]
# prepare the date index to associate with the prediction data
_, days = pred.shape
dates = []
for i in range(days):
date = last_close() + timedelta(days=i+1)
dates.append(date)
# prepare an empty array that will hold prediction data
x=np.zeros((60,6))
x.fill(np.nan)
# place pred close values in the last column of numpy array
x[:,5]=pred_denormalized
#covnert numpy array to a dataframe
df1=pd.DataFrame(x)
df1.columns = ['close', 'open', 'high', 'low', 'vols','pred_close']
# print('df1\n',df1.head())
# create the dates dataframe that will be associated with the prediction
idx1=pd.DataFrame(dates)
idx1.columns = ['date']
# print('idx1\n',idx1.head())
# concatenate the dates with the prediction values
df2 = pd.concat([idx1, df1], axis = 1)
df2.set_index('date', inplace=True)
# print('df\n', df.head())
# print('df2\n', df2.head())
# combine data from 3rd parth api with predict data, order by datetime index
df3 = pd.concat([df, df2], sort=True)
# print('df3\n',df3.head())
# reset index, provide access to date column for processing
df3.reset_index(inplace=True)
# print('df3\n',df3.head())
# process date column to seconds from epoch
df4 = dt_to_epoch(df3)
# print('df4\n',df4.head())
# select data for return
df5 = df4[['date','close','pred_close']]
# print('df5\n',df5.head())
return df5.to_json(orient='records')
def market_data():
est = pytz.timezone('US/Eastern')
ticker = 'SPY'
end_date = last_close().astimezone(est)
start_date = end_date + timedelta(days=-300)
# print(start_date.strftime(r'%Y-%m-%d'))
# print(end_date.strftime(r'%Y-%m-%d'))
response = get_stock_data(
ticker,
start_date.strftime(r'%Y-%m-%d'),
end_date.strftime(r'%Y-%m-%d'))
records = parse_json(response)
df = pd.DataFrame(records)
# ---------------Fix the date to be UTC equivalent of EST Stock Market Close
utc = pytz.utc
est = pytz.timezone('US/Eastern')
date_format='%Y-%m-%d'
# Convert datestring to datetime tz-naive
df['date'] = pd.to_datetime(df['date'], format=date_format, exact=False).dt.tz_localize(None)
# add 16 hours to tz-naive datetime
df['date'] = df['date'] + pd.DateOffset(hours=16)
# localize 1600 to est timezone
df['date'] = df['date'].dt.tz_localize(est)
# convert EDT to UTC time
df['date'] = df['date'].dt.tz_convert(utc)
# ---------------------------------------------------------------------------
df2 = dt_to_epoch(df)
return df2[['date','close']].to_json(orient='records')
def dt_to_epoch(df):
est = pytz.timezone('US/Eastern')
utc = pytz.utc
x = df['date'].astype('str').to_list()
date_rows = []
for row in x:
'''Pull out only the YYYYMMDD part of the timestamp'''
date_rows.append(row[:10])
df_date =
|
pd.DataFrame(date_rows)
|
pandas.DataFrame
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import json
import numpy as np
from IPython import embed
import os
from collections import OrderedDict
import pandas as pd
from warnings import warn
def sigm_tf(x):
return 1./(1 + np.exp(-1 * x))
#def sigm(x):
# return 2./(1 + np.exp(-2 * x)) - 1
def flatten(l):
return [item for sublist in l for item in sublist]
class QuaLiKizMultiNN():
def __init__(self, nns):
self._nns = nns
feature_names = nns[0]
for nn in self._nns:
if len(nn._target_names) == 1:
name = nn._target_names[0]
else:
NotImplementedError('Multitarget not implemented yet')
if np.all(nn._feature_names.ne(feature_names)):
Exception('Supplied NNs have different feature names')
if np.any(self._feature_min > self._feature_max):
raise Exception('Feature min > feature max')
self._target_min = pd.concat(
[nn._target_min for nn in self._nns])
self._target_max = pd.concat(
[nn._target_max for nn in self._nns])
@property
def _target_names(self):
targets = []
for nn in self._nns:
targets.extend(list(nn._target_names))
return targets
def get_output(self, input, output_pandas=True, clip_low=True, clip_high=True, low_bound=None, high_bound=None, **kwargs):
results =
|
pd.DataFrame()
|
pandas.DataFrame
|
from abc import abstractmethod
from itertools import chain
from pathlib import Path
from typing import List, Union, Dict, Callable
import pandas as pd
import json
class AdapterError(Exception):
pass
class NoValidSourcesError(AdapterError):
pass
class BaseTransformer:
"""
The base transformer class. Should not be instantiated directly.
"""
# TODO: init should take the configuration kwargs
def __init__(self, transpose: bool = False, concat_on_axis: Union[int, str] = None,
columns: List[Union[str, int]] = None, skip_errors: bool = False,
rename: Union[Callable, Dict[str, str]] = None, **kwargs):
""" Initialize the transformer.
:param transpose: whether to transpose the resulting matrix.
:param concat_on_axis: whether to concatenate data along some axis.
:param columns: column names.
:param skip_errors: whether to skip input files if an error is encountered.
:param rename: a dict or function suitable for passing to the Pandas rename function.
:param kwargs: optional keyword arguments to pass to reader.
"""
self.transpose = transpose
self.concat_on_axis = concat_on_axis
self.columns = columns
self.skip_errors = skip_errors
self.rename = rename
self.passed_kwargs = kwargs
@abstractmethod
def transform(self, source_files: List[Path]) -> pd.DataFrame:
""" Run the actual transformation.
:param source_files: the source files containing the data.
:return: a data frame.
"""
raise NotImplementedError
@abstractmethod
def _build_data_frame(self, source_files: List[Path]) -> pd.DataFrame:
""" Construct a data frame from the list of inpute files.
:param source_files: the source files containing the data.
:return: a data frame.
"""
raise NotImplementedError
class DelimitedTableTransformer(BaseTransformer):
"""
A transformer that changes the input data into a delimited table.
"""
def __init__(self, transpose: bool = False, concat_on_axis: Union[str, int] = None,
columns: List[Union[str, int]] = None, skip_errors: bool = False,
rename: Union[Callable, Dict[str, str]] = None, **kwargs):
""" Initialize the transformer.
:param transpose: whether to transpose the resulting data.
:param concat_on_axis: whether to concatenate the data along an axis.
:param columns: list of column names.
:param skip_errors: whether to skip errors.
:param rename: a dict or function suitable for passing to the Pandas rename function.
:param kwargs: keyword arguments to be passed to the reader.
"""
super(DelimitedTableTransformer, self).__init__(
transpose, concat_on_axis, columns, skip_errors, rename, **kwargs)
self.reader_kwargs = {
'comment': None,
'names': None,
'delimiter': None,
'header': 'infer',
'dtype': None,
'index_col': None,
'parse_dates': None,
'skiprows': None,
'iterator': True,
'chunksize': 50000
}
self.reader_kwargs.update(self.passed_kwargs)
def _build_data_frame(self, source_files: List[Path]):
""" Build a data frame from a list of source files. All kwargs set at initialization are passed
to the CSV reader.
:param source_files: a list of source files to read data from.
:return: a Pandas data frame.
"""
data_frames = [pd.read_csv(source_file, **self.reader_kwargs) for source_file in source_files]
# for the special case where every file is a column. this assumes all data can fit into memory
# TODO: replace this with dask stuff so that things can be lazily concatenated
if self.concat_on_axis:
df = pd.concat(data_frames, axis=self.concat_on_axis)
yield df
else:
df_chain = chain(*data_frames)
for chunk in df_chain:
if self.transpose:
yield chunk.transpose()
else:
yield chunk
def transform(self, source_files: List[Path]) -> pd.DataFrame:
""" Transform the data contained in the list of source files to something else. By default
simply returns the data frame consisting of the raw data.
:param source_files: a list of source files.
:return: a Pandas data frame.
"""
for df in self._build_data_frame(source_files):
yield df
class JsonTableTransformer(BaseTransformer):
def __init__(self, record_path: Union[List[str], str] = None,
transpose: bool = False,
concat_on_axis: Union[str, int] = None,
columns: List[Union[str, int]] = None,
skip_errors: bool = False,
rename: Union[Callable, Dict[str, str]] = None,
**kwargs):
super(JsonTableTransformer, self).__init__(
transpose, concat_on_axis, columns, skip_errors, rename, **kwargs)
self.record_path = record_path
self.reader_kwargs = {
'orient': None,
'typ': 'frame',
'dtype': None,
'convert_axes': None,
'convert_dates': True,
'keep_default_dates': True,
'precise_float': False,
'date_unit': None,
'encoding': None,
'lines': False,
'chunksize': None,
'compression': 'infer',
'nrows': None,
'storage_options': None
}
self.reader_kwargs.update(self.passed_kwargs)
@staticmethod
def _extract_data(filename: Union[Path, str], record_path: Union[List[str], str],
serialize: bool = True) -> Union[dict, list, str]:
with open(filename, 'r') as f:
data: dict = json.load(f)
if type(record_path) is str:
if serialize:
return json.dumps(data[record_path])
else:
return data[record_path]
elif type(record_path) is list:
for item in record_path:
data = data[item]
if serialize:
return json.dumps(data)
else:
return data
else:
raise TypeError('record_path must be a list or a string')
def _build_data_frame(self, source_files: List[Path]) -> pd.DataFrame:
# we're assuming any single json file can fit into memory here because we need to be able to
# access its internals to extract data from it
for source_file in source_files:
try:
if not self.record_path:
df = pd.read_json(source_file, **self.reader_kwargs)
df._source_file = source_file
else:
data = self._extract_data(source_file, self.record_path)
df = pd.read_json(data, **self.reader_kwargs)
df._source_file = source_file
yield df.transpose() if self.transpose else df
except Exception as ex:
if self.skip_errors:
print(f'skipping {source_file} due to error: {ex}')
yield
|
pd.DataFrame()
|
pandas.DataFrame
|
import itertools
import numpy as np
import pandas as pd
try:
from ortools.graph import pywrapgraph
except ModuleNotFoundError:
print('Could not import ortools')
# import networkx as nx
from .loading import subset2vec, vec2subset, compressSubsets
__all__ = ['DenseICSDist',
'pwICSDist',
'decomposeDist',
'getDecomposed']
"""Formulating polyfunctionality distance as a min cost flow problem"""
def pwICSDist(cdf, magCol='pctpos', cyCol='cytokine', indexCols=['ptid', 'visitday', 'tcellsub', 'antigen'], factor=100000):
"""Compute all pairwise ICS distances among samples indicated by index columns.
Parameters
----------
cdf : pd.DataFrame
Contains one row per cell population, many rows per sample.
magCol : str
Column containing the magnitude which should add up to 1 for all rows in a sample
cyCol : str
Column containing the marker combination for the row. E.g. IFNg+IL2-TNFa+
indexCols : list
List of columns that make each sample uniquely identifiable
factor : int
Since cost-flow estimates are based on integers, its effectively the number of
decimal places to be accurate to. Default 1e5 means magCol is multiplied by 1e5 before rounding to int.
Returns
-------
dmatDf : pd.DataFrame
Symetric pairwise distance matrix with hierarchical columns/index of indexCols"""
cdf = cdf.set_index(indexCols + [cyCol])[magCol].unstack(indexCols).fillna(0)
n = cdf.shape[1]
dmat = np.zeros((n, n))
tab = []
for i in range(n):
for j in range(n):
if i <= j:
d = DenseICSDist(cdf.iloc[:,i], cdf.iloc[:,j], factor=factor)
dmat[i, j] = d
dmat[j, i] = d
dmatDf = pd.DataFrame(dmat, columns=cdf.columns, index=cdf.columns)
return dmatDf
def DenseICSDist(freq1, freq2, factor=100000, verbose=False, tabulate=False):
"""Compute a positive, symetric distance between two frequency distributions,
where each node of the distribution can be related to every other node based
on marker combination (e.g. IFNg+IL2-TNFa-). Uses a cost-flow optimization
approach to finding the minimum dist/cost to move probability density from
one node (marker combination) to another, to have the effect of turning freq1
into freq2.
Parameters
----------
freq1, freq2 : pd.Series
Frequency distribution that should sum to one, with identical indices
containing all marker combinations
factor : int
Since cost-flow estimates are based on integers, its effectively the number of
decimal places to be accurate to. Default 1e5 means magCol is multiplied by 1e5 before rounding to int.
verbose : bool
Print all cost-flow arcs. Useful for debugging.
tabulate : bool
Optionally return a tabulation of all the cost-flows.
Returns
-------
cost : float
Total distance between distributions in probability units.
costtab : np.ndarray [narcs x nmarkers + 1]
Tabulation of the all the required flows to have freq1 == freq2
Each row is an arc. First nmarker columns indicate the costs between
the two nodes and last colum is the cost-flow/distance along that arc."""
nodeLabels = freq1.index.tolist()
nodeVecs = [subset2vec(m) for m in nodeLabels]
markers = nodeLabels[0].replace('-', '+').split('+')[:-1]
nmarkers = len(markers)
# nodes = list(range(len(nodeLabels)))
if nmarkers == 1:
flow = freq1[markers[0] + '+'] - freq2[markers[0] + '+']
if tabulate:
return np.abs(flow), np.zeros((0,nmarkers+1))
else:
return np.abs(flow)
def _cost(n1, n2):
"""Hamming distance between two node labels"""
return int(np.sum(np.abs(np.array(nodeVecs[n1]) - np.array(nodeVecs[n2]))))
diffv = freq1/freq1.sum() - freq2/freq2.sum()
diffv = (diffv * factor).astype(int)
extra = diffv.sum()
if extra > 0:
for i in range(extra):
diffv[i] -= 1
elif extra < 0:
for i in range(-extra):
diffv[i] += 1
assert diffv.sum() == 0
posNodes = np.nonzero(diffv > 0)[0]
negNodes = np.nonzero(diffv < 0)[0]
if len(posNodes) == 0:
"""Returns None when freq1 - freq2 is 0 for every subset/row"""
if tabulate:
return 0, np.zeros((0,nmarkers+1))
else:
return 0
"""Creates a dense network connecting all sources and sinks with cost/distance specified by how many functions differ
TODO: Could this instead be a sparse network connecting function combinations that only differ by 1? Cells have to move
multiple times along the network then. This may minimize to the same solution??"""
tmp = np.array([o for o in itertools.product(posNodes, negNodes)])
startNodes = tmp[:,0].tolist()
endNodes = tmp[:,1].tolist()
"""Set capacity to max possible"""
capacities = diffv[startNodes].tolist()
costs = [_cost(n1,n2) for n1,n2 in zip(startNodes, endNodes)]
supplies = diffv.tolist()
"""Instantiate a SimpleMinCostFlow solver."""
min_cost_flow = pywrapgraph.SimpleMinCostFlow()
"""Add each arc."""
for i in range(len(startNodes)):
min_cost_flow.AddArcWithCapacityAndUnitCost(startNodes[i], endNodes[i],
capacities[i], costs[i])
"""Add node supplies."""
for i in range(len(supplies)):
min_cost_flow.SetNodeSupply(i, supplies[i])
"""Find the minimum cost flow"""
res = min_cost_flow.SolveMaxFlowWithMinCost()
if res != min_cost_flow.OPTIMAL:
if verbose:
print('No optimal solution found.')
if tabulate:
return np.nan, None
else:
return np.nan
if verbose:
print('Minimum cost:', min_cost_flow.OptimalCost())
print('')
print(' Arc Flow / Capacity Cost')
for i in range(min_cost_flow.NumArcs()):
cost = min_cost_flow.Flow(i) * min_cost_flow.UnitCost(i)
print('%1s -> %1s %3s / %3s %3s' % (
min_cost_flow.Tail(i),
min_cost_flow.Head(i),
min_cost_flow.Flow(i),
min_cost_flow.Capacity(i),
cost))
cost = min_cost_flow.OptimalCost()/factor
if tabulate:
costtab = np.zeros((tmp.shape[0], nmarkers+1))
for arci in range(min_cost_flow.NumArcs()):
hVec = nodeVecs[min_cost_flow.Head(arci)]
tVec = nodeVecs[min_cost_flow.Tail(arci)]
costtab[arci, :nmarkers] = hVec - tVec
costtab[arci, nmarkers] = min_cost_flow.Flow(arci) / factor
return cost, costtab
else:
return cost
def decomposeDist(freq1, freq2, ICSDist=DenseICSDist, maxways=3, factor=100000, compressCache=None):
"""Compute decomposed distances between freq1 and freq2. The
decomposition includes distances based on marginal/one-way marker
combinations, two-way combinations, etc. up to maxways-way interactions.
Effectively this means compressing freq1/freq2 into lower-order representations
and computing the distances. The lower-order approximations will have distances
that are less than or equal to the total distance.
Parameters
----------
freq1, freq2 : pd.Series
Frequency distribution that should sum to one, with identical indices
containing all marker combinations
ICSDist : function
Function for computing the ICSDistance. Could conceivably
work for different distance functions because it works by marginalizing
the input distributions and does not rely on tabulation.
maxways : int
Indicates the maximum order of interactions (e.g. 3 means allowing
for three-way marker combinations)
factor : int
Since cost-flow estimates are based on integers, its effectively the number of
decimal places to be accurate to. Default 1e5 means magCol is multiplied by 1e5 before rounding to int.
Returns
-------
ctDf : pd.DataFrame
Decomposition of the distance with columns: markers, distance, nmarkers"""
nodeLabels = freq1.index.tolist()
nodeVecs = [subset2vec(m) for m in nodeLabels]
markers = nodeLabels[0].replace('-', '+').split('+')[:-1]
nmarkers = len(markers)
def _prepFreq(freq):
tmp = freq.reset_index()
tmp.columns = ['cytokine', 'freq']
tmp.loc[:, 'ptid'] = 0
return tmp
tmp1 = _prepFreq(freq1)
tmp2 = _prepFreq(freq2)
costs = []
markerCombs = []
for nwaysi in range(min(nmarkers, maxways)):
icombs = [d for d in itertools.combinations(np.arange(nmarkers), nwaysi+1)]
"""Number of times each marker appears in all decompositions"""
norm_factor = np.sum([0 in cyi for cyi in icombs])
for cyi in icombs:
cy = tuple((markers[i] for i in cyi))
if compressCache is None:
cfreq1 = compressSubsets(tmp1, subset=cy, indexCols=['ptid'], magCols=['freq'], nsubCols=None).set_index('cytokine')['freq']
cfreq2 = compressSubsets(tmp2, subset=cy, indexCols=['ptid'], magCols=['freq'], nsubCols=None).set_index('cytokine')['freq']
else:
cfreq1, cfreq2 = compressCache[cy]
cost = ICSDist(cfreq1, cfreq2, factor=factor)
costs.append(cost / norm_factor)
markerCombs.append(cy)
ctDf = pd.DataFrame({'markers':['|'.join(mc) for mc in markerCombs],
'distance':costs,
'nmarkers':[len(mc) for mc in markerCombs]})
return ctDf
def pwDecomposeDist(cdf, magCol='pctpos', cyCol='cytokine', indexCols=['ptid', 'visitday', 'tcellsub', 'antigen'], factor=100000, maxways=3):
"""Compute all pairwise ICS distances among samples indicated by index columns.
Distance is decomposed into marginal and higher-order interactions.
Parameters
----------
cdf : pd.DataFrame
Contains one row per cell population, many rows per sample.
magCol : str
Column containing the magnitude which should add up to 1 for all rows in a sample
cyCol : str
Column containing the marker combination for the row. E.g. IFNg+IL2-TNFa+
indexCols : list
List of columns that make each sample uniquely identifiable
factor : int
Since cost-flow estimates are based on integers, its effectively the number of
decimal places to be accurate to. Default 1e5 means magCol is multiplied by 1e5 before rounding to int.
maxways : int
Specify the degree of higher-order interactions evaluated in the decomposition
Returns
-------
decompDf : pd.DataFrame
Accounting of costs-flows/distances decomposed into one-way, two-way and three-way interactions"""
"""Do all the cytokine compressions once, upfront for efficiency"""
markers = cdf[cyCol].iloc[0].replace('-', '+').split('+')[:-1]
nmarkers = len(markers)
compressed = {}
norm_factor = {}
for nwaysi in range(min(nmarkers, maxways)):
icombs = [d for d in itertools.combinations(np.arange(nmarkers), nwaysi+1)]
"""Number of times each marker appears in all decompositions"""
norm_factor[nwaysi+1] = np.sum([0 in cyi for cyi in icombs])
for cyi in icombs:
cy = tuple((markers[i] for i in cyi))
tmp = compressSubsets(cdf, markerCol=cyCol, subset=cy, indexCols=indexCols, magCols=[magCol], nsubCols=None)
compressed[cy] = tmp.set_index(indexCols + [cyCol])[magCol].unstack(indexCols).fillna(0)
cdf = cdf.set_index(indexCols + [cyCol])[magCol].unstack(indexCols).fillna(0)
metadata = cdf.columns.tolist()
n = cdf.shape[1]
tab = []
for i in range(n):
for j in range(n):
if i <= j:
dec = decomposeDist(cdf.iloc[:,i], cdf.iloc[:,j],
DenseICSDist,
maxways=maxways,
factor=factor,
compressCache={cy: [compressed[cy].iloc[:, ii] for ii in [i,j]] for cy in compressed.keys()})
dec.loc[:, 'samp_i'] = i
dec.loc[:, 'samp_j'] = j
tab.append(dec)
decompDf = pd.concat(tab, axis=0)
return decompDf
def getDecomposed(decompDf, index, nway):
"""Pull-out a square, symetric, positive distance matrix from the decomposed distance DataFrame
Parameters
----------
decompDf : pd.DataFrame
Output from decomposeDist, containing several longform distance matrices
index : pd.MultiIndex or other array
From the pairwise distance matrix for which this is a decomposition
nway : int
Order of interactions for which a distance matrix will be extracted from the decompDf
Returns
-------
dmatDf : pd.DataFrame
Symetric pairwise distance matrix with hierarchical columns/index of indexCols"""
tmp = decompDf.loc[decompDf['nmarkers'] == nway].groupby(['samp_i', 'samp_j'])['distance'].agg(np.sum).unstack('samp_j')
lower_i = np.tril_indices(tmp.values.shape[0], k=-1)
tmp.values[lower_i] = tmp.values.T[lower_i]
tmp.columns = index
tmp.index = index
return tmp
_eg_3cytokine = ['IFNg-IL2-TNFa-',
'IFNg+IL2-TNFa-',
'IFNg-IL2+TNFa-',
'IFNg-IL2-TNFa+',
'IFNg+IL2+TNFa-',
'IFNg+IL2-TNFa+',
'IFNg-IL2+TNFa+',
'IFNg+IL2+TNFa+']
_eg_2cytokine = ['IFNg-IL2-',
'IFNg+IL2-',
'IFNg-IL2+',
'IFNg+IL2+']
def _example_data():
freq1 = pd.Series(np.zeros(len(cytokine)), index=_eg_3cytokine)
freq2 = pd.Series(np.zeros(len(cytokine)), index=_eg_3cytokine)
freq1['IFNg+IL2-TNFa+'] = 0.5
freq1['IFNg+IL2+TNFa-'] = 0.5
freq2['IFNg+IL2+TNFa+'] = 1
return freq1, freq2
def _test_decompose_pair():
freq1 = pd.Series(np.zeros(len(_eg_2cytokine)), index=_eg_2cytokine)
freq1['IFNg-IL2-'] = 1
freq2 = freq1.copy()
freq2['IFNg+IL2+'] += 0.1
freq2['IFNg-IL2-'] = 0.9
cost, costtab = DenseICSDist(freq1, freq2, factor=100000)
ctDf = decomposeDist(freq1, freq2, DenseICSDist)
def _test_decompose_pair_interaction():
freq1 =
|
pd.Series([0.1, 0.4, 0.4, 0.1], index=_eg_2cytokine)
|
pandas.Series
|
from functools import reduce
import numpy as np
import pandas as pd
import pyprind
from .enums import *
class Backtest:
"""Backtest runner class."""
def __init__(self, allocation, initial_capital=1_000_000, shares_per_contract=100):
assets = ('stocks', 'options', 'cash')
total_allocation = sum(allocation.get(a, 0.0) for a in assets)
self.allocation = {}
for asset in assets:
self.allocation[asset] = allocation.get(asset, 0.0) / total_allocation
self.initial_capital = initial_capital
self.stop_if_broke = True
self.shares_per_contract = shares_per_contract
self._stocks = []
self._options_strategy = None
self._stocks_data = None
self._options_data = None
@property
def stocks(self):
return self._stocks
@stocks.setter
def stocks(self, stocks):
assert np.isclose(sum(stock.percentage for stock in stocks), 1.0,
atol=0.000001), 'Stock percentages must sum to 1.0'
self._stocks = list(stocks)
return self
@property
def options_strategy(self):
return self._options_strategy
@options_strategy.setter
def options_strategy(self, strat):
self._options_strategy = strat
@property
def stocks_data(self):
return self._stocks_data
@stocks_data.setter
def stocks_data(self, data):
self._stocks_schema = data.schema
self._stocks_data = data
@property
def options_data(self):
return self._options_data
@options_data.setter
def options_data(self, data):
self._options_schema = data.schema
self._options_data = data
def run(self, rebalance_freq=0, monthly=False, sma_days=None):
"""Runs the backtest and returns a `pd.DataFrame` of the orders executed (`self.trade_log`)
Args:
rebalance_freq (int, optional): Determines the frequency of portfolio rebalances. Defaults to 0.
monthly (bool, optional): Iterates through data monthly rather than daily. Defaults to False.
Returns:
pd.DataFrame: Log of the trades executed.
"""
assert self._stocks_data, 'Stock data not set'
assert all(stock.symbol in self._stocks_data['symbol'].values
for stock in self._stocks), 'Ensure all stocks in portfolio are present in the data'
assert self._options_data, 'Options data not set'
assert self._options_strategy, 'Options Strategy not set'
assert self._options_data.schema == self._options_strategy.schema
option_dates = self._options_data['date'].unique()
stock_dates = self.stocks_data['date'].unique()
assert np.array_equal(stock_dates,
option_dates), 'Stock and options dates do not match (check that TZ are equal)'
self._initialize_inventories()
self.current_cash = self.initial_capital
self.trade_log = pd.DataFrame()
self.balance = pd.DataFrame({
'total capital': self.current_cash,
'cash': self.current_cash
},
index=[self.stocks_data.start_date - pd.Timedelta(1, unit='day')])
if sma_days:
self.stocks_data.sma(sma_days)
dates = pd.DataFrame(self.options_data._data[['quotedate',
'volume']]).drop_duplicates('quotedate').set_index('quotedate')
rebalancing_days = pd.to_datetime(
dates.groupby(pd.Grouper(freq=str(rebalance_freq) +
'BMS')).apply(lambda x: x.index.min()).values) if rebalance_freq else []
data_iterator = self._data_iterator(monthly)
bar = pyprind.ProgBar(len(stock_dates), bar_char='█')
for date, stocks, options in data_iterator:
if (date in rebalancing_days):
previous_rb_date = rebalancing_days[rebalancing_days.get_loc(date) -
1] if rebalancing_days.get_loc(date) != 0 else date
self._update_balance(previous_rb_date, date)
self._rebalance_portfolio(date, stocks, options, sma_days)
bar.update()
# Update balance for the period between the last rebalancing day and the last day
self._update_balance(rebalancing_days[-1], self.stocks_data.end_date)
self.balance['options capital'] = self.balance['calls capital'] + self.balance['puts capital']
self.balance['stocks capital'] = sum(self.balance[stock.symbol] for stock in self._stocks)
self.balance['stocks capital'].iloc[0] = 0
self.balance['options capital'].iloc[0] = 0
self.balance[
'total capital'] = self.balance['cash'] + self.balance['stocks capital'] + self.balance['options capital']
self.balance['% change'] = self.balance['total capital'].pct_change()
self.balance['accumulated return'] = (1.0 + self.balance['% change']).cumprod()
return self.trade_log
def _initialize_inventories(self):
"""Initialize empty stocks and options inventories."""
columns = pd.MultiIndex.from_product(
[[l.name for l in self._options_strategy.legs],
['contract', 'underlying', 'expiration', 'type', 'strike', 'cost', 'order']])
totals =
|
pd.MultiIndex.from_product([['totals'], ['cost', 'qty', 'date']])
|
pandas.MultiIndex.from_product
|
import unittest
import os
import tempfile
from collections import namedtuple
from blotter import blotter
from pandas.util.testing import assert_frame_equal, assert_series_equal, \
assert_dict_equal
import pandas as pd
import numpy as np
class TestBlotter(unittest.TestCase):
def setUp(self):
cdir = os.path.dirname(__file__)
self.prices = os.path.join(cdir, 'data/prices')
self.rates = os.path.join(cdir, 'data/rates/daily_interest_rates.csv')
self.log = os.path.join(cdir, 'data/events.log')
self.meta_log = os.path.join(cdir, 'data/meta_data.log')
def tearDown(self):
pass
def assertEventsEqual(self, evs1, evs2):
if len(evs1) != len(evs2):
raise(ValueError("Event lists length mismatch"))
for ev1, ev2 in zip(evs1, evs2):
self.assertEqual(ev1.type, ev2.type)
assert_dict_equal(ev1.data, ev2.data)
def assertEventTypes(self, evs1, evs2):
msg = "Event lists length mismatch\n\nLeft:\n%s \nRight:\n%s"
left_msg = ""
for ev in evs1:
left_msg += str(ev) + "\n"
right_msg = ""
for ev in evs2:
right_msg += ev.type + "\n"
msg = msg % (left_msg, right_msg)
if len(evs1) != len(evs2):
raise(ValueError(msg))
for ev1, ev2 in zip(evs1, evs2):
if ev1.type is not ev2.type:
raise(ValueError(msg))
def assertDictDataFrameEqual(self, dict1, dict2):
self.assertEqual(dict1.keys(), dict2.keys())
for key in dict1.keys():
try:
assert_frame_equal(dict1[key], dict2[key])
except AssertionError as e:
e.args = (("\nfor key %s\n" % key) + e.args[0],)
raise e
def make_blotter(self):
blt = blotter.Blotter(self.prices, self.rates)
return blt
def test_get_actions(self):
actions = [(pd.Timedelta("16h"), "PNL"),
(pd.Timedelta("16h"), "INTEREST")]
old_ts = pd.Timestamp("2017-01-04T10:30")
new_ts = pd.Timestamp("2017-01-06T10:30")
ac_ts = blotter.Blotter._get_actions(old_ts, new_ts, actions)
idx = pd.DatetimeIndex([pd.Timestamp("2017-01-04T16:00"),
pd.Timestamp("2017-01-04T16:00"),
pd.Timestamp("2017-01-05T16:00"),
pd.Timestamp("2017-01-05T16:00")])
ac_ts_ex = pd.Series(["PNL", "INTEREST", "PNL", "INTEREST"], index=idx)
assert_series_equal(ac_ts, ac_ts_ex)
def test_get_actions_weekend_filter(self):
actions = [(pd.Timedelta("16h"), "PNL"),
(pd.Timedelta("16h"), "INTEREST")]
old_ts = pd.Timestamp("2017-01-06T10:30")
new_ts = pd.Timestamp("2017-01-09T16:30")
ac_ts = blotter.Blotter._get_actions(old_ts, new_ts, actions)
idx = pd.DatetimeIndex([pd.Timestamp("2017-01-06T16:00"),
pd.Timestamp("2017-01-06T16:00"),
pd.Timestamp("2017-01-09T16:00"),
pd.Timestamp("2017-01-09T16:00")])
ac_ts_ex = pd.Series(["PNL", "INTEREST", "PNL", "INTEREST"], index=idx)
assert_series_equal(ac_ts, ac_ts_ex)
def test_trade_undefined_instrument(self):
blt = self.make_blotter()
ts = pd.Timestamp('2016-12-10T08:30:00')
instr = 'CLZ6'
qty = 1
price = 48.56
def make_trade():
blt._trade(ts, instr, qty, price)
self.assertRaises(KeyError, make_trade)
def test_get_meta_data(self):
blt = blt = blotter.Blotter(self.prices, self.rates, base_ccy="USD")
# currency of instrument defaults to base ccy of blotter when not given
blt.define_generic("CL", margin=0.1, multiplier=100, commission=2.5,
isFX=False)
meta = namedtuple('metadata', ['ccy', 'margin', 'multiplier',
'commission', 'isFX'])
metadata_exp = meta("USD", 0.1, 100, 2.5, False)
metadata = blt._gnrc_meta["CL"]
self.assertEqual(metadata, metadata_exp)
def test_get_holdings_empty(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
hlds = blt.get_holdings_value(ts)
assert_series_equal(hlds, pd.Series())
def test_get_holdings_value_no_fx_conversion(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
qty = 1
price = 0
blt.define_generic("SXM", "ZAR", 0.1, 1, 2.5)
blt.map_instrument("SXM", "SXMZ15")
blt._trade(ts, 'SXMZ15', qty, price)
def no_fx():
return blt.get_holdings_value(ts)
self.assertRaises(KeyError, no_fx)
def test_get_holdings_timestamp_before(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-05T00:00:00')
instr = 'ESZ15'
qty = 1
price = 2081
blt.define_generic("ES", "USD", 0.1, 100, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, instr, qty, price)
ts = pd.Timestamp('2015-08-04T00:00:00')
def get_holdings():
blt.get_holdings_value(ts)
self.assertRaises(ValueError, get_holdings)
def test_get_holdings_base_ccy(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'ESZ15'
qty = 1
price = 2081
blt.define_generic("ES", "USD", 0.1, 100, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, instr, qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
hlds = blt.get_holdings_value(ts)
hlds_exp = pd.Series([2082.73 * 100], index=['ESZ15'])
assert_series_equal(hlds, hlds_exp)
def test_get_holds_AUD_instr_AUDUSD_fxrate(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'APZ15'
qty = 1
price = 5200
blt.define_generic("AP", "AUD", 0.1, 1, 2.5)
blt.map_instrument("AP", "APZ15")
blt._trade(ts, instr, qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
hlds = blt.get_holdings_value(ts)
hlds_exp = pd.Series([5283 * 0.73457], index=['APZ15'])
assert_series_equal(hlds, hlds_exp)
def test_get_holds_CAD_instr_USDCAD_fxrate(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'SXMZ15'
qty = 1
price = 802.52
blt.define_generic("SXM", "CAD", 0.1, 1, 2.5)
blt.map_instrument("SXM", "SXMZ15")
blt._trade(ts, instr, qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
hlds = blt.get_holdings_value(ts)
hlds_exp = pd.Series([795.95 / 1.3183], index=['SXMZ15'])
assert_series_equal(hlds, hlds_exp)
def test_get_instruments_empty(self):
blt = self.make_blotter()
blt.connect_market_data()
instrs = blt.get_instruments()
assert_series_equal(instrs, pd.Series())
def test_get_instruments_multiplier(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'ESZ15'
qty = 1
price = 2081
blt.define_generic("ES", "USD", 0.1, 100, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, instr, qty, price)
instrs = blt.get_instruments()
instrs_exp = pd.Series([qty], index=['ESZ15'])
assert_series_equal(instrs, instrs_exp)
def test_get_instruments_two_ccy(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr1 = 'ESZ15'
instr2 = 'CLZ15'
qty = 1
price = 2081
blt.define_generic("ES", "USD", 0.1, 100, 2.5)
blt.map_instrument("ES", "ESZ15")
blt.define_generic("CL", "CAD", 0.1, 1, 2.5)
blt.map_instrument("CL", "CLZ15")
blt._trade(ts, instr1, qty, price)
blt._trade(ts, instr2, qty, price)
instrs = blt.get_instruments()
instrs_exp = pd.Series([qty, qty], index=['CLZ15', 'ESZ15'])
assert_series_equal(instrs, instrs_exp)
def test_get_trades_one_future_base_to_base(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'ESZ15'
qty = 1
price = 2081
mid_price = 2080.75
blt.define_generic("ES", "USD", 0.1, 50, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, instr, qty, price, mid_price)
trades = blt.get_trades()
cols = ['instrument', 'quantity', 'multiplier', 'price', 'ntc_price',
'ccy', 'fx_to_base']
exp_trades = pd.DataFrame([[instr, 1, 50, price, mid_price,
"USD", 1.0]], index=[ts], columns=cols)
exp_trades.index.name = 'timestamp'
assert_frame_equal(trades, exp_trades)
def test_get_trades_one_future_with_mid_price_fx(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'ESZ15'
qty = 1
price = 2081
mid_price = 2080.75
blt.define_generic("ES", "CAD", 0.1, 50, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, instr, qty, price, mid_price)
trades = blt.get_trades()
cols = ['instrument', 'quantity', 'multiplier', 'price', 'ntc_price',
'ccy', 'fx_to_base']
exp_trades = pd.DataFrame([[instr, 1, 50, price, mid_price, "CAD",
1 / 1.3125]], index=[ts], columns=cols)
exp_trades.index.name = 'timestamp'
assert_frame_equal(trades, exp_trades)
def test_get_trades_two_futures(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
instr = 'ESZ15'
qty = 1
price1 = 2081
mid_price1 = 2080.75
price2 = 2083
mid_price2 = 2082.75
blt.define_generic("ES", "USD", 0.1, 50, 2.5)
blt.map_instrument("ES", "ESZ15")
blt.map_instrument("ES", "ESF16")
blt._trade(ts, instr, qty, price1, mid_price1)
blt._trade(ts, instr, qty, price2, mid_price2)
trades = blt.get_trades()
cols = ['instrument', 'quantity', 'multiplier', 'price', 'ntc_price',
'ccy', 'fx_to_base']
data = [[instr, 1, 50, price1, mid_price1, "USD", 1.0],
[instr, 1, 50, price2, mid_price2, "USD", 1.0]]
exp_trades = pd.DataFrame(data, index=[ts, ts], columns=cols)
exp_trades.index.name = 'timestamp'
assert_frame_equal(trades, exp_trades)
def test_create_unknown_event(self):
blt = self.make_blotter()
ts = pd.Timestamp('2015-08-03T00:00:00')
def create_unknown():
return blt.create_events(ts, "NotAllowed")
self.assertRaises(NotImplementedError, create_unknown)
def test_dispatch_unknown_event(self):
blt = self.make_blotter()
ev = blotter._Event("NotAnEvent",
{"timestamp": pd.Timestamp('2015-01-01')})
def dispatch_unknown():
blt.dispatch_events([ev])
self.assertRaises(NotImplementedError, dispatch_unknown)
def test_create_interest_event(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-03T00:00:00')
blt._holdings.update_cash(ts, "AUD", 1000000)
blt._holdings.update_cash(ts, "JPY", 1000000)
ts = pd.Timestamp('2015-08-04T00:00:00')
evs = blt.create_events(ts, "INTEREST")
irates = pd.read_csv(self.rates, index_col=0, parse_dates=True)
aud_int = irates.loc[ts, "AUD"] / 365 * 1000000
jpy_int = irates.loc[ts, "JPY"] / 365 * 1000000
evs_exp = [blotter._Event("INTEREST", {"timestamp": ts, "ccy": "AUD",
"quantity": aud_int}),
blotter._Event("INTEREST", {"timestamp": ts, "ccy": "JPY",
"quantity": jpy_int})]
self.assertEventsEqual(evs, evs_exp)
def test_create_interest_event_no_rate(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-03T00:00:00')
# No ZAR data
blt._holdings.update_cash(ts, "ZAR", 1000000)
ts = pd.Timestamp('2015-08-04T00:00:00')
def get_interest():
return blt.create_events(ts, "INTEREST")
self.assertRaises(KeyError, get_interest)
def test_create_interest_weekend_event(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-06T00:00:00')
blt._holdings.update_cash(ts, "AUD", 1000000)
blt._holdings.update_cash(ts, "JPY", 1000000)
ts = pd.Timestamp('2015-08-07T00:00:00')
evs = blt.create_events(ts, "INTEREST")
irates = pd.read_csv(self.rates, index_col=0, parse_dates=True)
aud_int = irates.loc[ts, "AUD"] / 365 * 3 * 1000000
jpy_int = irates.loc[ts, "JPY"] / 365 * 3 * 1000000
evs_exp = [blotter._Event("INTEREST", {"timestamp": ts, "ccy": "AUD",
"quantity": aud_int}),
blotter._Event("INTEREST", {"timestamp": ts, "ccy": "JPY",
"quantity": jpy_int})]
self.assertEventsEqual(evs, evs_exp)
def test_create_margin_event(self):
blt = blotter.Blotter(self.prices, self.rates, base_ccy="USD",
margin_charge=0.015)
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
qty = 1
price = 0
blt.define_generic("SXM", "CAD", 0.1, 1, 2.5)
blt.map_instrument("SXM", "SXMZ15")
blt.define_generic("ES", "USD", 0.05, 1, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, 'SXMZ15', qty, price)
blt._trade(ts, "ESZ15", qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
ev = blt.create_events(ts, "MARGIN")
rates = pd.read_csv(self.rates, index_col=0, parse_dates=True)
es_fp = os.path.join(self.prices, 'ESZ15.csv')
es = pd.read_csv(es_fp, index_col=0, parse_dates=True)
sxm_fp = os.path.join(self.prices, 'SXMZ15.csv')
sxm = pd.read_csv(sxm_fp, index_col=0, parse_dates=True)
usdcad_fp = os.path.join(self.prices, 'USDCAD.csv')
usdcad = pd.read_csv(usdcad_fp, index_col=0, parse_dates=True)
es_notional = es.loc[ts].values * qty * 0.05
sxm_notional = sxm.loc[ts].values * qty * 0.1 / usdcad.loc[ts].values
notnl = float(es_notional + sxm_notional)
quantity = notnl * (rates.loc[ts, "USD"] + 0.015) / 365
ev_exp = [blotter._Event("INTEREST", {"timestamp": ts, "ccy": "USD",
"quantity": quantity})]
self.assertEventsEqual(ev, ev_exp)
def test_create_short_margin_event(self):
blt = blotter.Blotter(self.prices, self.rates, base_ccy="USD",
margin_charge=0.015)
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
qty = -1
price = 0
blt.define_generic("ES", "USD", 0.05, 1, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, "ESZ15", qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
ev = blt.create_events(ts, "MARGIN")
rates = pd.read_csv(self.rates, index_col=0, parse_dates=True)
es_fp = os.path.join(self.prices, 'ESZ15.csv')
es = pd.read_csv(es_fp, index_col=0, parse_dates=True)
es_notional = float(es.loc[ts].values * np.abs(qty) * 0.05)
quantity = es_notional * (rates.loc[ts, "USD"] + 0.015) / 365
ev_exp = [blotter._Event("INTEREST", {"timestamp": ts, "ccy": "USD",
"quantity": quantity})]
self.assertEventsEqual(ev, ev_exp)
def test_create_pnl_event(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
qty = 1
price = 0
blt.define_generic("SXM", "CAD", 0.1, 1, 2.5)
blt.map_instrument("SXM", "SXMZ15")
blt.define_generic("ES", "USD", 0.05, 1, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, 'SXMZ15', qty, price)
blt._trade(ts, "ESZ15", qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
ev = blt.create_events(ts, "PNL")
es_fp = os.path.join(self.prices, 'ESZ15.csv')
es = pd.read_csv(es_fp, index_col=0, parse_dates=True)
sxm_fp = os.path.join(self.prices, 'SXMZ15.csv')
sxm = pd.read_csv(sxm_fp, index_col=0, parse_dates=True)
prices = pd.concat([es.loc[ts], sxm.loc[ts]], axis=0)
ev_exp = [blotter._Event("PNL", {"timestamp": ts, "prices": prices})]
self.assertEventsEqual(ev, ev_exp)
def test_create_pnl_event_no_price(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
qty = 1
price = 0
# No price info for BBBZ15
blt.define_generic("BBB", "CAD", 0.1, 1, 2.5)
blt.map_instrument("BBB", "BBBZ15")
blt._trade(ts, 'BBBZ15', qty, price)
ts = pd.Timestamp('2015-08-05T00:00:00')
def no_price():
return blt.create_events(ts, "PNL")
self.assertRaises(KeyError, no_price)
def test_closed_position_pnl_event(self):
blt = self.make_blotter()
blt.connect_market_data()
ts = pd.Timestamp('2015-08-04T00:00:00')
qty = 1
price = 0
blt.define_generic("ES", "USD", 0.05, 1, 2.5)
blt.map_instrument("ES", "ESZ15")
blt._trade(ts, "ESZ15", qty, price)
ts =
|
pd.Timestamp('2015-08-05T00:00:00')
|
pandas.Timestamp
|
import ast
import csv
import sys, os
from pandas import DataFrame, to_datetime
from PyQt5 import uic
from PyQt5.QtChart import QChartView, QValueAxis, QBarCategoryAxis, QBarSet, QBarSeries, QChart
from PyQt5.QtCore import QFile, QTextStream, Qt
from PyQt5.QtGui import QPainter
from PyQt5.QtWidgets import QApplication, QComboBox, QHeaderView, QLineEdit, QMainWindow, QPushButton, QTableWidget, QTableView,QTableWidgetItem, QMessageBox, QFileDialog
from client.charts import Piechart, Barchart
from client.datahandler import DataHandler
from client.logs import PandasModel
from modules.Processor import ProcessData
from modules.Parser import export_to_file
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
#Load the UI Page
uic.loadUi('client/main.ui', self)
# upload
self.actionUpload.triggered.connect(self.upload)
# Exit
self.actionExit.triggered.connect(self.exit)
self.df = None
self.searchdata = None
# Export Protocols and IP
self.actionSummary.triggered.connect(self.Summary)
# Exporting table details
self.actionTableDetails.triggered.connect(self.TableDetails)
def popup(self):
'''
Popup Dialog to request file to be uploaded
'''
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Information)
msgBox.setWindowTitle("New File")
msgBox.setText("Upload New File to Analyze.")
msgBox.setStandardButtons(QMessageBox.Open)
msgBox.buttonClicked.connect(self.upload)
msgBox.exec()
def upload(self):
'''
Uploads file to application
'''
fileName, _ = QFileDialog.getOpenFileName(None, "Select File", "", "Log Files (*.csv *.tsv *.json *.xls *.xlsx)")
if fileName is not '':
proc = ProcessData(fileName)
proc.parse()
data = proc.analyse()
self.df = DataFrame.from_dict(data)
self.display()
else:
self.showMessageBox("File Not Uploaded", "File Not Uploaded Successfully")
def display(self):
'''
Calls the data processor DataHandler and displays the result
'''
if self.df is not None:
self.data = DataHandler(self.df)
QApplication.processEvents()
# self.summary = self.data.getSummary()
self.chartseries = self.data.getSeries()
# Displays Charts and Tables
self.displaychart("attackchart", self.chartseries, "Attack Types")
self.displaytable("datatable", self.df)
self.displaytop("topip", self.data.getTopIPs(), ['IP Addresses', 'Count'])
self.displaytop("topports", self.data.getTopProtocols(), ['Protocol : Port', 'Count'])
QApplication.processEvents()
# Search Fields and Buttons
self.isatksearch = self.findChild(QComboBox, "isAtk")
self.ipsearch = self.findChild(QLineEdit, "ipaddr")
self.protocolsearch = self.findChild(QLineEdit, "protocol")
self.portsearch = self.findChild(QLineEdit, "port")
self.atksearch = self.findChild(QLineEdit, "atk")
self.timesearch = self.findChild(QLineEdit, "time")
self.searchbtn = self.findChild(QPushButton, "searchbtn")
self.searchbtn.clicked.connect(self.search)
self.clearbtn = self.findChild(QPushButton, "clearbtn")
self.clearbtn.clicked.connect(self.clear)
QApplication.processEvents()
self.bargraph()
QApplication.processEvents()
def bargraph(self):
'''
Processes and Creates Bar Graph.
'''
self.barchart = self.findChild(QChartView, "attackgraph")
bardata = self.data.getBar()
chartobj = Barchart(bardata)
chartseries = chartobj.getSeries()
# create QChart object and add data
chart = QChart()
chart.addSeries(chartseries)
chart.setTitle("Attacks Over the Past 12 Months")
chart.setAnimationOptions(QChart.SeriesAnimations)
axisX = QBarCategoryAxis()
axisX.append(chartobj.getKeys())
chart.addAxis(axisX, Qt.AlignBottom)
axisY = QValueAxis()
axisY.setRange(0, chartobj.getMax())
chart.addAxis(axisY, Qt.AlignLeft)
chart.legend().setVisible(False)
self.barchart.setChart(chart)
def clear(self):
'''
Clears Search Form
'''
self.isatksearch.setCurrentIndex(0)
self.ipsearch.clear()
self.protocolsearch.clear()
self.portsearch.clear()
self.atksearch.clear()
self.timesearch.clear()
self.pdmdl.clear()
self.searchdata = None
self.logtable.setModel(self.pdmdl)
def displaychart(self, widgetname, chartseries, title):
'''
Displays PieChart
------------------
widgetname : str of widget to call in .ui file
chartseries: PyQT Series to be displayed on chart
title: str of title to be header of chart
'''
self.piechart = self.findChild(QChartView, widgetname)
chartdata = Piechart(chartseries, title).create()
self.piechart.setChart(chartdata)
self.piechart.setRenderHint(QPainter.Antialiasing)
def displaytop(self, widgetname, data, header):
'''
Displays Top IP/Protocols Table
Parameters
------------------
widgetname : str of widget to call in .ui file
data: dict of top ip/protocol data to display
title: str of title to be header of chart
'''
table = self.findChild(QTableWidget, widgetname)
table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
table.verticalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
table.setColumnCount(2)
table.setRowCount(5)
table.setHorizontalHeaderLabels(header)
index = 0
for k,v in data.items():
table.setItem(int(index),0, QTableWidgetItem(k))
table.setItem(int(index),1, QTableWidgetItem(str(v)))
index += 1
def displaytable(self, widgetname, data):
'''
Displays Log Table
Parameters
------------------
widgetname: str of widget to call in .ui file
data: Pandas Dataframe
'''
self.logtable = self.findChild(QTableView, widgetname)
self.logtable.setSortingEnabled(True)
self.pdmdl = PandasModel(data)
self.logtable.setModel(self.pdmdl)
self.logtable.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.logtable.verticalHeader().setSectionResizeMode(QHeaderView.Stretch)
def search(self):
'''
Checks Search Form to be sent to table
'''
# get searchquery as dictionary
searchquery = {'IsAtk': self.isatksearch.currentText(), 'IP': self.ipsearch.text(), 'Protocol': self.protocolsearch.text(), 'Port': self.portsearch.text(), 'Atk': self.atksearch.text(), 'Time': self.timesearch.text()}
# check if search query is not empty
searchquery = {k: v for k, v in searchquery.items() if v != ''}
atk = {'Yes': 1, 'No':0}
if searchquery.get('IsAtk', None) == '-':
del searchquery['IsAtk']
elif searchquery.get('IsAtk', None) != None:
searchquery['IsAtk'] = atk[searchquery['IsAtk']]
# check if the searchquery is empty
if bool(searchquery) is True:
self.searchdata = self.pdmdl.search(searchquery)
if self.searchdata is not None:
self.logtable.setModel(PandasModel(self.searchdata, search=True))
else:
self.clear()
else:
self.clear()
def Summary(self):
'''
Exports summary
'''
protocol = self.data.getTopProtocols()
ip = self.data.getTopIPs()
fileName = QFileDialog.getSaveFileName(self, "Save File", "", "Log Files (*.csv *.tsv *.json *.xls *.xlsx)")
if fileName[0]:
export_data = [x + y for x, y in zip(protocol.items(), ip.items())]
export_dataframe = ['Protocol & Ports','Counts','IP Address','Counts']
export_dataframe = DataFrame(export_data, columns=export_dataframe)
export_to_file(fileName[0], export_dataframe)
self.showMessageBox('File Exported',"File Exported successfully")
else:
self.showMessageBox('File not Exported',"File not Exported successfully")
def TableDetails(self):
'''
Exports table data
'''
fileName = QFileDialog.getSaveFileName(self, "Save File", "", "Log Files (*.csv *.tsv *.json *.xls *.xlsx)")
if self.searchdata is None:
exportdata = self.data.getData()
formatteddata = exportdata.transpose()
formatteddata['IsAtk'] = formatteddata['IsAtk'].map({1:'Yes', 0:'No'}) # Changes 1 and 0 to Yes and No for table
formatteddata['Time'] =
|
to_datetime(formatteddata['Time'],unit='s')
|
pandas.to_datetime
|
from sklearn.metrics import classification_report
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score
import torch.nn as nn
import torch.utils.data as data
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from Common_Function_ import *
import torch.multiprocessing
from models.MesoNet4_forEnsemble import MesoInception4 as MesoNet
from PIL import Image
torch.multiprocessing.set_sharing_strategy('file_system')
GPU = '1,2'
os.environ["CUDA_VISIBLE_DEVICES"] = GPU
device = torch.device(f"cuda" if torch.cuda.is_available() else "cpu")
calculate = False
EPOCHS = 50
BATCH_SIZE = 64
VALID_RATIO = 0.3
N_IMAGES = 100
START_LR = 1e-5
END_LR = 10
NUM_ITER = 100
PATIENCE_EARLYSTOP=10
pretrained_size = 224
pretrained_means = [0.4489, 0.3352, 0.3106]#[0.485, 0.456, 0.406]
pretrained_stds= [0.2380, 0.1965, 0.1962]#[0.229, 0.224, 0.225]
class CustumDataset(Dataset):
def __init__(self, data, target, data_2=None, target_2=None, transform=None):
self.data = data
self.target = target
self.data_video = data_2
self.target_video = target_2
self.transform = transform
if self.data_video:
self.len_data2 = len(self.data_video)
print(self.len_data2)
print(len(self.data_video))
print(len(self.data))
assert (self.len_data2 == len(self.target) == len(self.target_video) == len(self.data) == len(self.data_video))
def __len__(self):
return len(self.target)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
path = self.data[idx]
img = Image.open(path)
img = img.convert('RGB')
if self.transform:
img = self.transform(img)
if self.data_video:
path_video = self.data[idx]
img_video = Image.open(path_video)
img_video = img_video.convert('RGB')
if self.transform:
img_video = self.transform(img_video)
return img, self.target[idx], img_video, self.target_video[idx]
train_transforms = transforms.Compose([
transforms.Resize((pretrained_size,pretrained_size)),
transforms.RandomHorizontalFlip(0.5),
# transforms.RandomCrop(pretrained_size, padding = 10),
transforms.ToTensor(),
transforms.Normalize(mean = pretrained_means,
std = pretrained_stds)
])
test_transforms = transforms.Compose([
transforms.Resize((pretrained_size,pretrained_size)),
transforms.ToTensor(),
transforms.Normalize(mean = pretrained_means,
std = pretrained_stds)
])
####
def getnum_of_files(path):
_dict = {}
for (a,b,c) in os.walk(path):
if not b:
_dict[a.split('/')[-1]] = len(c)
return _dict
####
test_dir = ["/media/data1/mhkim/FAKEVV_hasam/test/SPECTOGRAMS/real_A_fake_others",
"/media/data1/mhkim/FAKEVV_hasam/test/FRAMES/real_A_fake_others"]
list_test = [datasets.ImageFolder(root = test_dir[0],transform = None),
datasets.ImageFolder(root = test_dir[1],transform = None)]
print(len(list_test[0].targets))
print(len(list_test[1].targets))
#test
list_glob_testpath = [list_test[1].samples[i][0] for i in range(len(list_test[1].samples))]
list_targets_testpath = [list_test[1].targets[i] for i in range(len(list_test[1].targets))]
list_num_test = getnum_of_files(test_dir[1])
list_glob_testpath_video=[]; list_targets_testpath_video=[]
for i in range(len(list_test[0].samples)):
_str = list_test[0].samples[i][0].split('/')[-2]
num_repeat = int(list_num_test[_str])
list_glob_testpath_video += [list_test[0].samples[i][0]] * num_repeat
list_targets_testpath_video += [list_test[0].targets[i]] * num_repeat
i = i + num_repeat
assert(list_targets_testpath_video == list_targets_testpath)
test_data = CustumDataset(list_glob_testpath, list_targets_testpath, list_glob_testpath_video, list_targets_testpath_video, test_transforms)
print(f'Number of testing examples: {len(test_data)}')
pretrained_size = 224
pretrained_means = [0.4489, 0.3352, 0.3106]#[0.485, 0.456, 0.406]
pretrained_stds= [0.2380, 0.1965, 0.1962]#[0.229, 0.224, 0.225]
models = [MesoNet(), MesoNet()]
MODELS_NAME = 'MesoInception4'
# checkpoinsts for model loaders : [VIDEO(A&B), FRAME(A&C)]
list_checkpoint = [torch.load(f'/home/mhkim/DFVV/PRETRAINING/{MODELS_NAME}_realA_fakeB.pt')['state_dict'],
torch.load(f'/home/mhkim/DFVV/PRETRAINING/{MODELS_NAME}_realA_fakeC.pt')['state_dict']]
models[0].load_state_dict(list_checkpoint[0])
models[1].load_state_dict(list_checkpoint[1])
enc = OneHotEncoder(sparse=False)
y_true = np.zeros((0, 2), dtype=np.int8)
y_pred = np.zeros((0, 2), dtype=np.int8)
models[0].eval()
models[1].eval()
test_iterator = data.DataLoader(test_data,
shuffle = True,
batch_size = BATCH_SIZE)
def count(x):
return x.value_counts().sort_values(ascending=False).index[0]
import pandas as pd
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
'''The first task is to read the json file as a Pandas DataFrame and delete the rows
which contain invalid values in the attributes of “points” and “price”.'''
df = pd.read_json('datasets//wine.json')
df = df.dropna(subset=['points', 'price'])
'''what are the 10 varieties of wine which receives the highest number of reviews?'''
dfTop10MostReviews = df['variety'].value_counts()[:10]
print("Q1:")
print(dfTop10MostReviews)
print('\n')
'''which varieties of wine having the average price less than 20, with the average points at least 90?'''
averagePoints = df.groupby('variety', as_index=False)['points'].mean()
averagePoints = averagePoints.loc[averagePoints['points']>=90]
averagePrice = df.groupby('variety', as_index=False)['price'].mean()
averagePrice = averagePrice.loc[averagePrice['price']<20]
q2 =
|
pd.merge(averagePrice, averagePoints, on='variety')
|
pandas.merge
|
import collections
import os
import traceback
from datetime import datetime, timedelta
import pandas as pd
from openpyxl.styles import PatternFill
import config
from openpyxl import load_workbook
import numpy as np
import xlrd
def get_date_index(date, dates_values, lookback_index=0):
if isinstance(dates_values[0], str):
dates_values = [datetime.strptime(x, '%Y-%m-%d') for x in dates_values]
elif isinstance(dates_values[0], np.datetime64):
dates_values = [x.astype('M8[ms]').astype('O') for x in dates_values]
if len(dates_values) > 1:
if dates_values[0] > dates_values[1]: # if dates decreasing rightwards or downwards
date_index = next((index for (index, item) in enumerate(dates_values) if item < date), 0)
# adjusted_lookback = date_item - lookback_period
# lookback_index = next((
# index for (index, item) in enumerate(dates_values[date_index:]) if item <= adjusted_lookback), 0)
return date_index + lookback_index
else: # if dates increasing rightwards or downwards
date_index = next((index for (index, item) in enumerate(dates_values) if item > date), -1)
# adjusted_lookback = date_item - lookback_period
# lookback_index = next((
# index for (index, item) in enumerate(dates_values[date_index:]) if item > adjusted_lookback), -1)
return date_index - lookback_index # TODO Fix lookback index is a date here, convert before calling method
else:
return 0
def slice_series_dates(series, from_date, to_date):
date_idx_from = get_date_index(from_date, series.index)
date_idx_to = get_date_index(to_date, series.index)
return series[date_idx_from:date_idx_to]
def save_into_csv(filename, df, sheet_name='Sheet1', startrow=None,
overwrite_sheet=False, concat=False,
**to_excel_kwargs):
# ignore [engine] parameter if it was passed
if 'engine' in to_excel_kwargs:
to_excel_kwargs.pop('engine')
writer =
|
pd.ExcelWriter(filename, engine='openpyxl')
|
pandas.ExcelWriter
|
import copy
import os
from functools import partial
from pathlib import Path
from typing import List, Tuple
import hydra
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytorch_lightning as pl
import scipy
import torch
from hydra.utils import get_original_cwd
from omegaconf import DictConfig, OmegaConf
from sklearn.neighbors import KDTree
from src.dataset.datamodule import GsdcDatamodule, interpolate_vel
from src.dataset.utils import get_groundtruth
from src.modeling.pl_model import LitModel
from src.postprocess.metric import print_metric
from src.postprocess.postporcess import (apply_kf_smoothing, filter_outlier,
mean_with_other_phones)
from src.postprocess.visualize import add_distance_diff
from src.utils.util import set_random_seed
pd.set_option("display.max_rows", 100)
SEED = 42
def check_test_df(path_a, path_b):
df_a = pd.read_csv(path_a)
df_b = pd.read_csv(path_b)
df_a = df_a.rename(columns={"latDeg": "latDeg_gt", "lngDeg": "lngDeg_gt"})
df = pd.merge(df_a, df_b, on=["phone", "millisSinceGpsEpoch"])
met_df = print_metric(df=df)
return met_df
def load_dataset(is_test: bool = True) -> Tuple[pd.DataFrame, pd.DataFrame]:
data_dir = Path(
get_original_cwd(), "../input/google-smartphone-decimeter-challenge"
)
fname = "test" if is_test else "train"
df =
|
pd.read_csv(data_dir / f"baseline_locations_{fname}.csv")
|
pandas.read_csv
|
import pandas as pd
import numpy as np
import warnings
from dateutil.parser import parse
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = 'Times New Roman'
import seaborn as sns
sns.set_style('whitegrid')
### 一、数据清洗
option_contract = pd.read_excel('option_contract.xlsx')
#### 获取期权合约数据,
# 剔除华泰柏瑞的信息,以及多余的列:'kind', 'name', 'exercise_type’
##剔除华泰柏瑞的信息
list_name = list(option_contract.name)
del_rows = [i for i in range(len(list_name)) if '华泰柏瑞' in list_name[i]]
option_contract_2 = option_contract.drop(del_rows)
##剔除多余的列:'kind', 'name', 'exercise_type’
option_contract_3 = option_contract_2.drop(['kind', 'name', 'exercise_type'] \
, axis=1)
#### 插入一列,列名为'ttm',代表存续期,以天为单位表示,
# 并保留存续期大于30天的期权合约
##插入一列,列名为'ttm'
option_contract_3['ttm'] = pd.Series(pd.to_datetime(option_contract_3['maturity_date']) \
- pd.to_datetime(option_contract_3['list_date']))
##以天为单位表示
option_contract_3['ttm'] = option_contract_3['ttm']. \
map(lambda x: x.days)
##保留存续期大于30天的期权合约
df = option_contract_3.drop(option_contract_3[option_contract_3.ttm <= 30].index)
#### 剔除到期日在2019年之后的期权合约,
# 并将剩下所有的maturity_date储存在一个新的容器里,
##生成一个新的DataFrame,储存到期日在2020年以前所有的期权合约
df_2 = df.drop(df[df.maturity_date >= '2020-1-1'].index)
##将剩下所有的maturity_date储存在一个新的序列maturity_date_cleaned里
maturity_date_cleaned = df_2.maturity_date.value_counts().sort_index().index
#### 生成一个新的options列表,列表中每个元素用以储存每个到期日的所有期权合约
options = [df_2[df_2.maturity_date == i] for i in maturity_date_cleaned]
#### 读取price_start和price end数据
# price_strat储存着每月第一个交易日所有期权的收盘价
# price_end储存着每月到期日所有期权的收盘价
price_start = pd.read_excel('price_start.xlsx')
price_end = pd.read_excel('price_end.xlsx')
##获得每月第一个交易日据具体日期
start_date = price_start.trade_date.value_counts().sort_index().index
# 把用int数字表示的日期转化为真正的日期形式
price_start['Date_True'] = pd.Series([parse(str(y)) for y in list(price_start.trade_date)])
##获得每月到期日具体日期
end_date = price_end.trade_date.value_counts().sort_index().index
# 把用int数字表示的日期转化为真正的日期形式
ls = pd.Series([parse(str(y)) for y in list(price_end.trade_date)])
price_end['Date_True'] = ls
####搜集每个price_strat和price_end中所有日期标的资产的收盘价,
# 整理成excel文件并读取
ETF_start =
|
pd.read_excel('50ETF_Start.xlsx')
|
pandas.read_excel
|
# Import Module
import PyPDF2
from PyPDF2.utils import PdfReadError
import pdfx
from urlextract import URLExtract
import requests
import fitz
import click
import argparse
import os
from urllib.parse import urlparse, ParseResult
from fpdf import FPDF
import gspread
import pandas as pd
from gspread_dataframe import get_as_dataframe, set_with_dataframe
#import pdb;pdb.set_trace()
# Parse args
parser = argparse.ArgumentParser(description='Description of your program')
parser.add_argument('-p','--path', help='Localization of the files', default= "./CitationSaver/")
parser.add_argument('-d','--destination', help='Destination of the URLs extract', default= "./URLs/")
parser.add_argument('-a','--afterprocessed', help='Destination of the files processed', default= "./Processed/")
parser.add_argument('-w','--pathwarc', help='Destination of the WARCs for each file', default= "./WARCs/")
parser.add_argument('-j','--pathjson', help='Destination of the json file with google service key', default= "JSON")
parser.add_argument('-k','--key', help='Key Google Spreadsheet', default= "KEY")
parser.add_argument('-ws','--worksheet', help='Worksheet Google Spreadsheet', default= "WORKSHEET")
args = vars(parser.parse_args())
#Connect gspread
gc = gspread.service_account(filename=args['pathjson'])
sh = gc.open_by_key(args['key'])
worksheet = sh.worksheet(args['worksheet'])
#Transform worksheet to pandas dataframe
df = get_as_dataframe(worksheet)
#Global variable with the URLs check for each document
list_urls_check = []
# Extract URLs from text
def extract_url(text, list_urls):
extractor = URLExtract()
urls = extractor.find_urls(text)
for url in urls:
url = url.replace(",", "")
if "http" in url:
url = url[url.find('http'):]
if url not in list_urls:
list_urls.append(url)
# Check if the URLs is available
def check_url(scheme, netloc, path, url_parse, output):
url_parse = ParseResult(scheme, netloc, path, *url_parse[3:])
response = requests.head(url_parse.geturl())
if str(response.status_code).startswith("2") or str(response.status_code).startswith("3"):
output.write(url_parse.geturl()+"\n")
list_urls_check.append(url_parse.geturl())
else:
url_parse = ParseResult("https", netloc, path, *url_parse[3:])
response = requests.head(url_parse.geturl())
if str(response.status_code).startswith("2") or str(response.status_code).startswith("3"):
output.write(url_parse.geturl()+"\n")
list_urls_check.append(url_parse.geturl())
def check_pdf(file_name, file):
try:
pdf = PyPDF2.PdfFileReader(file_name)
return True
except PdfReadError:
return False
def extract_urls_pdf(file, file_name, list_urls):
#First method: PyPDF2
# Open File file
pdfFileObject = open(file_name, 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObject)
# Iterate through all pages
for page_number in range(pdfReader.numPages):
pageObject = pdfReader.getPage(page_number)
# Extract text from page
pdf_text = pageObject.extractText()
extract_url(pdf_text, list_urls)
if not list_urls:
#Update GoogleSheet
update_google_sheet(file, "", "", "", "Problem using PyPDF2 process", True)
# CLose the PDF
pdfFileObject.close()
#Second method: PDFx
# Read PDF File
pdf = pdfx.PDFx(file_name)
# Get list of URL
json = pdf.get_references_as_dict()
if len(json) != 0:
for elem in json['url']:
if elem not in list_urls:
list_urls.append(elem)
else:
#Update GoogleSheet
update_google_sheet(file, "", "", "", "Problem using PDFx process", True)
#Third method: fitz
# Load PDF
with fitz.open(file_name) as doc:
text = ""
for page in doc:
text += page.getText().strip()#.replace("\n", "")
text = ' '.join(text.split())
extract_url(text, list_urls)
def check_urls(list_urls, output_file):
urls_to_google_sheet = []
if list_urls != []:
# Process the URLs
with open(output_file, 'w') as output:
# Remove mailto links
links = [url for url in list_urls if "mailto:" not in url]
for elem in links:
#Remove trash at the end of the URLs
if elem.endswith(";") or elem.endswith(".") or elem.endswith(")") or elem.endswith("/"):
elem = elem[:-1]
url_parse = urlparse(elem, 'http')
#URL parse
scheme = url_parse.scheme
netloc = url_parse.netloc or url_parse.path
path = url_parse.path if url_parse.netloc else ''
if not netloc.startswith('www.'):
netloc = 'www.' + netloc
try:
#Check if URL
check_url(scheme, netloc, path, url_parse, output)
except:
continue
#else:
#do something
def update_google_sheet(file, path_output, list_urls, list_urls_check, note, error):
#Get the index from the file being processed in the google sheet
index = df.index[df['File Name CitationSaver System']==file].tolist()
if not error:
#Check if columns are empty for the present row
if pd.isnull(df.at[index[0], 'Results URLs File Path']) and pd.isnull(df.at[index[0], 'Results URLs without check']) and
|
pd.isnull(df.at[index[0], 'Results URLs with check'])
|
pandas.isnull
|
# -*- coding: utf-8 -*-
"""
Get input data from Excel files, and calculate epidemiological parameters
"""
import os
import numpy as np
import pandas as pd
import datetime as dt
from . import param_parser
from .get_initial_state import InitialModelState
from datetime import datetime
def aggregate_params_and_data(yaml_fp):
"""Aggregates all run parameters. Reads from a config YAML file
at `yaml_fp`, and calls SEIR_get_data to retrieve demographic data.
Returns a dictionary of aggregated parameters.
"""
config = param_parser.load(yaml_fp, validate=False)
# -------------Get data/params from get_data/params ----------------
# handling of legacy param names, formatted as:
# [old name which is still supported, new name]
legacy_conversions = tuple([
['sd_date', 'c_reduction_date'],
['DATA_FOLDER', 'data_folder'],
['CITY', 'city'],
])
for conversion in legacy_conversions:
old_name = conversion[0]
new_name = conversion[1]
if new_name not in config:
assert old_name in config, "config YAML has no field " + \
"`{}` (formerly known as `{}`)".format(new_name, old_name)
config[new_name] = config[old_name]
# get demographics, school calendar, and transmission data from Excel files
AgeGroupDict, metro_pop, school_calendar, \
time_begin, FallStartDate, Phi, symp_h_ratio_overall, \
symp_h_ratio, hosp_f_ratio = SEIR_get_data(config=config)
config.update({
"AgeGroupDict": AgeGroupDict,
'metro_pop': metro_pop,
'school_calendar': school_calendar,
'time_begin': time_begin,
'FallStartDate': FallStartDate,
'phi': Phi,
#initial_state': config['initial_state'],
'initial_i': config['I0'],
'symp_h_ratio_overall': symp_h_ratio_overall,
'symp_h_ratio': symp_h_ratio,
'hosp_f_ratio': hosp_f_ratio
})
# -------------Get initial state of model --------------------------
## -- get initial state of compartments
# todo: SEIR model should take a new arg "init_type" that explicitly states whether to initialize every compartment or just infected
# todo: currently the type of initialization is inferred from the instance type of "initial_i" -- that is sure to break at some point
init_state = InitialModelState(config['total_time'], config['interval_per_day'], config['n_age'], config['n_risk'],
config['I0'], metro_pop)
compartments = init_state.initialize()
# todo: more graceful and transparent override of user config specified start date
# todo: perhaps in param_parser we can check that time_begin_sim is None if a I0 is a file path
if init_state.start_day:
print('Start date as specified in the config file is overridden by initialization from a deterministic solution.')
print('The new start date is {}'.format(init_state.start_day))
date_begin = init_state.start_day
config['time_begin_sim'] = datetime.strftime(date_begin, '%Y%m%d') # return datetime to its expected string format
# todo: we should re-save this config to reflect the updated start time
# ------------- Update config with revised initial conditions -------
config['initial_state'] = compartments
config['t_offset'] = init_state.offset
return config
def SEIR_get_data(config):
""" Gets input data from Excel files. Takes a configuration
dictionary `config` that must minimally contain the following keys:
:data_folder: str, path of Excel files
:city: str, name of city simulated
:n_age: int, number of age groups
:n_risk: int, number of risk groups
"""
# ingest from configuration dictionary
data_folder = config['data_folder']
city = config['city']
n_age = config['n_age']
n_risk = config['n_risk']
H_RELATIVE_RISK_IN_HIGH = config['H_RELATIVE_RISK_IN_HIGH']
D_RELATIVE_RISK_IN_HIGH = config['D_RELATIVE_RISK_IN_HIGH']
HIGH_RISK_RATIO = config['HIGH_RISK_RATIO']
H_FATALITY_RATIO = config['H_FATALITY_RATIO']
INFECTION_FATALITY_RATIO = config['INFECTION_FATALITY_RATIO']
OVERALL_H_RATIO = config['OVERALL_H_RATIO']
ASYMP_RATE = config['ASYMP_RATE']
age_group_dict = config['age_group_dict']
# ------------------------------
us_population_filename = 'US_pop_UN.csv'
population_filename = '{}_Population_{}_age_groups.csv'
population_filename_dict = {}
for key in age_group_dict.keys():
population_filename_dict[key] = population_filename.format(city, str(key))
school_calendar_filename = '{}_School_Calendar.csv'.format(city)
contact_matrix_all_filename_dict = {5: 'ContactMatrixAll_5AgeGroups.csv',
3: 'ContactMatrixAll_3AgeGroups.csv'}
contact_matrix_school_filename_dict = {5: 'ContactMatrixSchool_5AgeGroups.csv',
3: 'ContactMatrixSchool_3AgeGroups.csv'}
contact_matrix_work_filename_dict = {5: 'ContactMatrixWork_5AgeGroups.csv',
3: 'ContactMatrixWork_3AgeGroups.csv'}
contact_matrix_home_filename_dict = {5: 'ContactMatrixHome_5AgeGroups.csv',
3: 'ContactMatrixHome_3AgeGroups.csv'}
## Load data
# Population in US
df_US = pd.read_csv(data_folder + us_population_filename, index_col=False)
GroupPaperPop = df_US.groupby('GroupPaper')['Value'].sum().reset_index(name='GroupPaperPop')
GroupCOVIDPop = df_US.groupby('GroupCOVID')['Value'].sum().reset_index(name='GroupCOVIDPop')
df_US = pd.merge(df_US, GroupPaperPop)
df_US = pd.merge(df_US, GroupCOVIDPop)
# Calculate age specific and risk group specific symptomatic hospitalization ratio
df_US['Overall_H_Ratio'] = df_US['GroupPaper'].map(OVERALL_H_RATIO) / 100.
df_US['YHR_paper'] = df_US['Overall_H_Ratio'] / (1 - ASYMP_RATE)
df_US['YHN_1yr'] = df_US['YHR_paper'] * df_US['Value']
GroupCOVID_YHN = df_US.groupby('GroupCOVID')['YHN_1yr'].sum().reset_index(name='GroupCOVID_YHN')
df_US = pd.merge(df_US, GroupCOVID_YHN)
df_US['YHR'] = df_US['GroupCOVID_YHN'] / df_US['GroupCOVIDPop']
df_US['GroupCOVIDHighRiskRatio'] = df_US['GroupCOVID'].map(HIGH_RISK_RATIO) / 100.
df_US['YHR_low'] = df_US['YHR'] /(1 - df_US['GroupCOVIDHighRiskRatio'] + \
H_RELATIVE_RISK_IN_HIGH * df_US['GroupCOVIDHighRiskRatio'])
df_US['YHR_high'] = H_RELATIVE_RISK_IN_HIGH * df_US['YHR_low']
# Calculate age specific and risk group specific hospitalized fatality ratio
df_US['I_Fatality_Ratio'] = df_US['GroupPaper'].map(INFECTION_FATALITY_RATIO) / 100.
df_US['YFN_1yr'] = df_US['I_Fatality_Ratio'] * df_US['Value'] / (1 - ASYMP_RATE)
GroupCOVID_YFN = df_US.groupby('GroupCOVID')['YFN_1yr'].sum().reset_index(name='GroupCOVID_YFN')
df_US = pd.merge(df_US, GroupCOVID_YFN)
df_US['YFR'] = df_US['GroupCOVID_YFN'] / df_US['GroupCOVIDPop']
df_US['YFR_low'] = df_US['YFR'] / (1 - df_US['GroupCOVIDHighRiskRatio'] + \
D_RELATIVE_RISK_IN_HIGH * df_US['GroupCOVIDHighRiskRatio'])
df_US['YFR_high'] = D_RELATIVE_RISK_IN_HIGH * df_US['YFR_low']
df_US['HFR'] = df_US['YFR'] / df_US['YHR']
df_US['HFR_low'] = df_US['YFR_low'] / df_US['YHR_low']
df_US['HFR_high'] = df_US['YFR_high'] / df_US['YHR_high']
df_US_dict = df_US[['GroupCOVID', 'YHR', 'YHR_low', 'YHR_high', \
'HFR_low', 'HFR_high']].drop_duplicates().set_index('GroupCOVID').to_dict()
Symp_H_Ratio_dict = df_US_dict['YHR']
Symp_H_Ratio_L_dict = df_US_dict['YHR_low']
Symp_H_Ratio_H_dict = df_US_dict['YHR_high']
Hosp_F_Ratio_L_dict = df_US_dict['HFR_low']
Hosp_F_Ratio_H_dict = df_US_dict['HFR_high']
Symp_H_Ratio = np.array([Symp_H_Ratio_dict[i] for i in age_group_dict[n_age]])
Symp_H_Ratio_w_risk = np.array([[Symp_H_Ratio_L_dict[i] for i in age_group_dict[n_age]], \
[Symp_H_Ratio_H_dict[i] for i in age_group_dict[n_age]]])
Hosp_F_Ratio_w_risk = np.array([[Hosp_F_Ratio_L_dict[i] for i in age_group_dict[n_age]], \
[Hosp_F_Ratio_H_dict[i] for i in age_group_dict[n_age]]])
df = pd.read_csv(data_folder + population_filename_dict[n_age], index_col=False)
pop_metro = np.zeros(shape=(n_age, n_risk))
for r in range(n_risk):
pop_metro[:, r] = df.loc[df['RiskGroup'] == r, age_group_dict[n_age]].values.reshape(-1)
# Transmission adjustment multiplier per day and per metropolitan area
df_school_calendar = pd.read_csv(data_folder + school_calendar_filename, index_col=False)
school_calendar = df_school_calendar['Calendar'].values.reshape(-1)
school_calendar_start_date = dt.datetime.strptime(np.str(df_school_calendar['Date'][0]), '%m/%d/%y')
df_school_calendar_aug = df_school_calendar[df_school_calendar['Date'].str[0].astype(int) >= 8]
fall_start_date = df_school_calendar_aug[df_school_calendar_aug['Calendar'] == 1].Date.to_list()[0]
fall_start_date = '20200' + fall_start_date.split('/')[0] + fall_start_date.split('/')[1]
# Contact matrix
phi_all = pd.read_csv(data_folder + contact_matrix_all_filename_dict[n_age], header=None).values
phi_school =
|
pd.read_csv(data_folder + contact_matrix_school_filename_dict[n_age], header=None)
|
pandas.read_csv
|
from elasticsearch import Elasticsearch
import os
import pandas as pd
from typing import List, Dict, Callable, Any, Union, Tuple
from copy import deepcopy
import numpy as np
import re
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
ES_SERVER = None
HOST = "localhost"
PORT = "9200"
eINDEX = "phd"
eDOC = "lifelong"
def flatten_dict(dd, separator='.', prefix=''):
if isinstance(dd, dict):
new_d = {
prefix + separator + str(k) if prefix else str(k): v
for kk, vv in dd.items() for k, v in flatten_dict(vv, separator, str(kk)).items()
}
return new_d
elif isinstance(dd, list):
if len(dd) > 0:
if isinstance(dd[0], dict):
new_d = {
prefix + separator + str(k) if prefix else str(k): v
for kk, vv in enumerate(dd) for k, v in
flatten_dict(vv, separator, str(kk)).items()
}
return new_d
new_d = {prefix: dd}
return new_d
def is_numeric(vv):
if vv is None:
return False
try:
a = float(vv)
except Exception as e:
return False
return True
def get_type_string(vv):
s = str(type(float))
return s.replace("<class '", "").replace("'>", "")
def flatten_dict_keys(dd, separator='.', prefix=''):
""" Transform complex data recursive to unique keys """
if isinstance(dd, dict):
all_k = set()
for kk, vv in dd.items():
k_name = "[_]" if is_numeric(kk) else kk
all_k.update(flatten_dict_keys(vv, separator=separator,
prefix=f"{prefix}{k_name}{separator}"))
return all_k
elif isinstance(dd, list):
if len(dd) > 0:
if isinstance(dd[0], dict):
all_k = set()
for vv in dd:
all_k.update(flatten_dict_keys(vv, separator=separator,
prefix=f"{prefix}[_]{separator}"))
return all_k
else:
return set([f"{prefix}[{get_type_string(dd)}]"])
return set([f"{prefix}<{get_type_string(dd)}>"])
else:
return set([f"{prefix}<{get_type_string(dd)}>"])
def get_complex_key_recursive(dd: Dict, key: List[str], sep: str = ".", sit: str = "[_]") -> Dict:
""" Get 1 complex key recursive """
if len(key) < 1:
return dd
if re.match("\[.*\]", key[0]):
if isinstance(dd, dict):
res = {}
for kk, vv in dd.items():
res[kk] = get_complex_key_recursive(vv, key[1:], sep=sep, sit=sit)
return res
else:
res = {}
for kk, vv in enumerate(dd):
res[kk] = get_complex_key_recursive(vv, key[1:], sep=sep, sit=sit)
return res
kk = key[0]
while kk not in dd and not re.match("\[.*\]", key[0]):
key = key[1:]
if len(key) > 0:
kk += sep + key[0]
else:
break
if kk not in dd:
return None
return {kk: get_complex_key_recursive(dd[kk], key[1:], sep=sep, sit=sit)}
def rem_complex_key_recursive(dd: Dict, key: List[str], sep: str = ".", sit: str = "[_]"):
""" Inplace Remove recursive complex key """
if re.match("\[.*\]", key[0]):
if isinstance(dd, dict):
for kk, vv in dd.items():
rem_complex_key_recursive(vv, key[1:], sep=sep, sit=sit)
else:
for kk, vv in enumerate(dd):
rem_complex_key_recursive(vv, key[1:], sep=sep, sit=sit)
kk = key[0]
while kk not in dd and not re.match("\[.*\]", key[0]):
key = key[1:]
if len(key) > 0:
kk += sep + key[0]
else:
break
if kk not in dd:
return
if len(key) > 1:
rem_complex_key_recursive(dd[kk], key[1:], sep=sep, sit=sit)
else:
dd.pop(kk)
def multi_index_df_to_dict(df, level=0) -> Dict:
if level > 0:
d = {}
it = df.index.levels[0] if hasattr(df.index, "levels") else df.index
for idx in it:
d[idx] = multi_index_df_to_dict(df.loc[idx], level=level-1)
return d
elif isinstance(df, pd.DataFrame):
d = {}
for idx, df_select in df.groupby(level=[0]):
d[idx] = df_select[0][0]
return d
else:
return df[0]
def exclude_dict_complex_keys(data: Dict, exclude_keys: List[str],
separator: str =".", siterator: str ="[_]") -> Dict:
""" Returns new dictionary without the specified complex keys """
data = deepcopy(data)
for key in exclude_keys:
key = key.split(".")
if key[-1].startswith("<") and key[-1].endswith(">"):
key = key[:-1]
rem_complex_key_recursive(data, key, sep=separator, sit=siterator)
return data
def include_dict_complex_keys(data: Dict, include_keys: List[str],
smart_group: Union[int, List[int]] = 0,
separator: str =".", siterator: str ="[_]"):
""" get only included keys from dictionary. """
ret = {}
smart_groups = smart_group
if isinstance(smart_groups, list):
assert len(smart_groups) == len(include_keys), "Len of smart_group must equal include_keys"
else:
smart_groups = [smart_group] * len(include_keys)
for orig_key, smart_group in zip(include_keys, smart_groups):
key = orig_key.split(".")
if re.match("\[.*\]", key[-1]) or re.match("<.*>", key[-1]):
key = key[:-1]
key_data = get_complex_key_recursive(data, key, sep=separator, sit=siterator)
if smart_group > 0:
flat_data = flatten_dict(key_data)
if not np.any(flat_data.values()):
continue
df = pd.DataFrame([x.split(separator) for x in flat_data.keys()])
max_cl = df.columns.max()
df["values"] = flat_data.values()
df["common"] = ""
common = []
variable = []
for i in range(max_cl+1):
if len(df[i].unique()) == 1:
df["common"] += df[i] + separator
common.append(i)
else:
variable.append(i)
df = df.drop(common, axis=1)
for col in df.columns:
if is_numeric(df.loc[0, col]) and col != "values":
df.loc[:, col] = df[col].apply(lambda x: int(float(x)))
# Merge common columns
index_col = [df["common"].values] + [df[x].values for x in variable]
index = pd.MultiIndex.from_arrays(index_col, names=range(len(index_col)))
df_index = pd.DataFrame(df["values"].values, index=index)
# Only if smart group > 1 drop indexes
group = 1
index_level = len(index.levels) - 2
while group < smart_group and index_level >= 0:
index_tuple = []
values = []
for date, new_df in df_index.groupby(level=index_level):
values.append(new_df[0].values)
index_tuple.append(new_df.index.values[0][:-1])
index = pd.MultiIndex.from_tuples(index_tuple)
df_index = pd.DataFrame([0] * len(values), index=index)
df_index.loc[:, 0] =
|
pd.Series(values)
|
pandas.Series
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2020, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os.path
import unittest
import pandas as pd
import pandas.io.common
import biom
import skbio
import qiime2
from pandas.util.testing import assert_frame_equal, assert_series_equal
from q2_types.feature_table import BIOMV210Format
from q2_types.feature_data import (
TaxonomyFormat, HeaderlessTSVTaxonomyFormat, TSVTaxonomyFormat,
DNAFASTAFormat, DNAIterator, PairedDNAIterator,
PairedDNASequencesDirectoryFormat, AlignedDNAFASTAFormat,
DifferentialFormat, AlignedDNAIterator
)
from q2_types.feature_data._transformer import (
_taxonomy_formats_to_dataframe, _dataframe_to_tsv_taxonomy_format)
from qiime2.plugin.testing import TestPluginBase
# NOTE: these tests are fairly high-level and mainly test the transformer
# interfaces for the three taxonomy file formats. More in-depth testing for
# border cases, errors, etc. are in `TestTaxonomyFormatsToDataFrame` and
# `TestDataFrameToTSVTaxonomyFormat` below, which test the lower-level helper
# functions utilized by the transformers.
class TestTaxonomyFormatTransformers(TestPluginBase):
package = 'q2_types.feature_data.tests'
def test_taxonomy_format_to_dataframe_with_header(self):
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
exp = pd.DataFrame([['k__Foo; p__Bar', '-1.0'],
['k__Foo; p__Baz', '-42.0']], index=index,
columns=['Taxon', 'Confidence'], dtype=object)
_, obs = self.transform_format(
TaxonomyFormat, pd.DataFrame,
filename=os.path.join('taxonomy', '3-column.tsv'))
assert_frame_equal(obs, exp)
def test_taxonomy_format_to_dataframe_without_header(self):
# Bug identified in https://github.com/qiime2/q2-types/issues/107
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
columns = ['Taxon', 'Unnamed Column 1', 'Unnamed Column 2']
exp = pd.DataFrame([['k__Foo; p__Bar', 'some', 'another'],
['k__Foo; p__Baz', 'column', 'column!']],
index=index, columns=columns, dtype=object)
_, obs = self.transform_format(
TaxonomyFormat, pd.DataFrame,
filename=os.path.join('taxonomy', 'headerless.tsv'))
assert_frame_equal(obs, exp)
def test_taxonomy_format_to_series_with_header(self):
index = pd.Index(['seq1', 'seq2'], name='Feature ID', dtype=object)
exp = pd.Series(['k__Foo; p__Bar', 'k__Foo; p__Baz'], index=index,
name='Taxon', dtype=object)
_, obs = self.transform_format(
TaxonomyFormat, pd.Series,
filename=os.path.join('taxonomy', '3-column.tsv'))
|
assert_series_equal(obs, exp)
|
pandas.util.testing.assert_series_equal
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Data Analysis IEEE-CIS Fraud Detection dataset.
(https://www.kaggle.com/c/ieee-fraud-detection).
############### TF Version: 1.13.1/Python Version: 3.7 ###############
"""
import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
input_dir = os.getcwd() + "\\ieee-fraud-detection"
print(os.listdir(input_dir))
# import data [index_col指定哪一列数据作为行索引,返回DataFrame]
train_tran = pd.read_csv(input_dir + "\\train_transaction.csv", index_col="TransactionID")
train_iden = pd.read_csv(input_dir + "\\train_identity.csv", index_col="TransactionID")
# tests_tran = pd.read_csv(input_dir + "\\test_transaction.csv", index_col="TransactionID")
# tests_iden = pd.read_csv(input_dir + "\\test_identity.csv", index_col="TransactionID")
train = train_tran.merge(train_iden, how="left", left_index=True, right_index=True)
# tests = tests_tran.merge(tests_iden, how="left", left_index=True, right_index=True)
plt_show = 0
if plt_show:
print(train.shape) # (590540, 433)
print(train.head(5))
# print(tests.shape) # (506691, 432)
# print(tests.head(5))
y_train = train["isFraud"].copy()
x_train = train.drop("isFraud", axis=1)
# x_tests = tests.copy()
plt_show = 1
if plt_show:
print(y_train.shape) # (590540,)
# print(y_train.head(5))
print(x_train.shape) # (590540, 432)
# print(x_train.head(5))
# print(x_tests.shape) # (506691, 432)
# =============================================================================
# =============================================================================
# explore data [describe single variables]
# Categorical => isFraud/ProductCD/DeviceType——Fig_1.png
# isFraud==>极不平衡[0:569877,1:20663],正样本比例3.5%左右
# isFraud极不平衡[0/1],ProductCD不平衡[W/H/C/S/R]
# DeviceType=desktop:mobile=86:56 [76% for null values]
# ProductCD: W/C类别欺诈样本数量最多, C/S类别欺诈比例最高
# DeviceType: mobile/desktop欺诈样本数量接近,但mobile类别欺诈比例较高
plt_show = 1
if plt_show:
isFraud_cnt = 0
if isFraud_cnt:
# isFraud数量统计
train_feat = pd.DataFrame()
train_feat["isFraud"] = train["isFraud"]
feat1 = train_feat[train_feat["isFraud"] == 1]
feat2 = train_feat[train_feat["isFraud"] == 0]
print(train_feat.shape)
print(feat1.shape)
print(feat2.shape)
isFraud_cnt = 0
if isFraud_cnt:
# ProductCD数量统计
train_feat =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 26 18:17:30 2015
@author: <NAME>
"""
import pandas
import numpy
import scipy.stats
import seaborn
import matplotlib.pyplot as plt
data = pandas.read_csv('gapminder.csv', low_memory=False)
# new code setting variables you will be working with to numeric
data['Alcoholuse'] = pandas.to_numeric(data['Alcoholuse'], errors='coerce')
data['Income'] = pandas.to_numeric(data['Income'], errors='coerce')
data['suicideper100th'] =
|
pandas.to_numeric(data['suicideper100th'], errors='coerce')
|
pandas.to_numeric
|
import demoDay21_recsys_music.hyj.gen_cf_data_hyj as gen
import demoDay21_recsys_music.hyj.config_hyj as conf
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
#显示所有列
|
pd.set_option('display.max_columns', None)
|
pandas.set_option
|
import time
import os
import io
import json
import shutil
import zipfile
import pathlib
import pandas as pd
import boto3
import datetime
import botocore
from dateutil.parser import parse
s3 = boto3.client('s3')
lookoutmetrics_client = boto3.client( "lookoutmetrics")
def lambda_handler(event, context):
#Function to format the date given by the event
def datetime_from_string(s):
try:
dt = datetime.datetime.fromisoformat(s.split("[")[0])
except ValueError:
dt = datetime.datetime.strptime(s.split("[")[0], "%Y-%m-%dT%H:%MZ")
return dt
#Function to update the metricValue_AnomalyScore csv in the case that one already exists
def update_Anomaly_CSV(event,key,bucket,obj,response):
print('object exist')
#Reading the existing file
original_df = pd.read_csv(obj.get("Body"), index_col=False)
file2 = original_df.to_dict('list')
#getting the needed data
metricList = response['MetricList']
dimensionList = response['DimensionList']
metricName = event['impactedMetric']['metricName']
#Column names generator
data2={}
data2['key']=[]
data2['Timestamp'] =[]
for i in dimensionList:
data2[i]=[]
# data2[i]=[]
for i in metricList:
data2[i['MetricName']+'AnomalyMetricValue']=[]
data2[i['MetricName']+'GroupScore']=[]
#Data collection from the event for the CSV
for i in event['impactedMetric']['relevantTimeSeries']:
for a in i['dimensions']:
data2[a['dimensionName']].append(a['dimensionValue'])
data2[metricName+'AnomalyMetricValue'].append(i['metricValue'])
data2[metricName+'GroupScore'].append(event['anomalyScore'])
data2['Timestamp'].append(start_time)
nRow=len(data2['Timestamp'])
nDimension = len(dimensionList)
#key generator
i=0
while i<nRow:
value=''
for a in dimensionList:
value+=str(data2[a][i])
value= str(data2['Timestamp'][i])+value
data2['key'].append(value)
i=i+1
c=0
#Checking if the data is already in the original file and ammend the empty spaces and add the data
for n in data2['key']:
if n in file2['key']:
where=file2['key'].index(n)
file2[metricName+'AnomalyMetricValue'][where] = data2[metricName+'AnomalyMetricValue'][c]
file2[metricName+'GroupScore'][where] =data2[metricName+'GroupScore'][c]
else:
file2['key'].append(data2['key'][c])
for i in dimensionList:
file2[i].append(data2[i][c])
file2[metricName+'AnomalyMetricValue'].append(data2[metricName+'AnomalyMetricValue'][c])
file2[metricName+'GroupScore'].append(data2[metricName+'GroupScore'][c])
file2['Timestamp'].append(dateTime)
c+=1
df = pd.DataFrame.from_dict(data=file2, orient='index')
df2 = df.transpose()
with io.StringIO() as filename:
df2.to_csv(filename, index=False, encoding='utf-8', date_format='%Y-%m-%d %H:%M:%S')
response = s3.put_object(
Bucket=bucket, Key=key, Body=filename.getvalue()
)
print('updated Anomaly csv saved')
#If the metricValue_AnomalyScore file does not exist it will create one
def generate_Anomaly_CSV(event,key,bucket,response):
#getting the needed data
metricList = response['MetricList']
dimensionList = response['DimensionList']
metricName = event['impactedMetric']['metricName']
pd.options.mode.use_inf_as_na = True
#Column names generator
data2={}
data2['key']=[]
data2['Timestamp'] =[]
for i in dimensionList:
data2[i]=[]
data2[i]=[]
for i in metricList:
data2[i['MetricName']+'AnomalyMetricValue']=[]
data2[i['MetricName']+'GroupScore']=[]
#Data collection for the CSV
for i in event['impactedMetric']['relevantTimeSeries']:
for a in i['dimensions']:
data2[a['dimensionName']].append(a['dimensionValue'])
data2[metricName+'AnomalyMetricValue'].append(i['metricValue'])
data2[metricName+'GroupScore'].append(event['anomalyScore'])
data2['Timestamp'].append(start_time)
nRow=len(data2['Timestamp'])
#key generator
i=0
while i<nRow:
value=''
for a in dimensionList:
value+=str(data2[a][i])
value= str(data2['Timestamp'][i])+value
data2['key'].append(value)
i+=1
df = pd.DataFrame.from_dict(data=data2, orient='index')
df2 = df.transpose()
with io.StringIO() as filename:
df2.to_csv(filename, index=False, encoding='utf-8', date_format='%Y-%m-%d %H:%M:%S')
response = s3.put_object(
Bucket=bucket, Key=key, Body=filename.getvalue()
)
print('Anomaly csv saved in', key)
#Checks if the metricValue_AnomalyScore file already exists
def Anomaly_CSV_Check(event,key,bucket,response):
try:
obj = s3.get_object(Bucket=bucket,Key=key)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code']=='404' or e.response['Error']['Code']=='NoSuchKey':
print('the Anomaly csv file does not exist and we will generate the very first file now')
generate_Anomaly_CSV(event,key,bucket,response)
else:
print('something else happened')
print('error is', e.response)
raise
else:
update_Anomaly_CSV(event,key,bucket,obj,response)
#Updates the dimensionContributions csv file if it exists
def update_Dimension_CSV(event,key,obj,bucket):
print('object exist')
original_df = pd.read_csv(obj.get("Body"), index_col=False)
file = original_df.to_dict('list')
#Column Titles generation
data = {}
data ['Timestamp'] =[]
data['metricName'] =[]
data['dimensionName'] =[]
data['dimensionValue'] =[]
data['valueContribution'] =[]
#Data collection for the CSV
for i in event['impactedMetric']['dimensionContribution']:
for a in i['dimensionValueContributions']:
data['Timestamp'].append(start_time)
data['dimensionName'].append(i['dimensionName'])
data['dimensionValue'].append(a['dimensionValue'])
data['valueContribution'].append(a['valueContribution'])
data['metricName'].append(event['impactedMetric']['metricName'])
df=
|
pd.DataFrame(data=data)
|
pandas.DataFrame
|
from flask import Flask, jsonify, request, render_template, Blueprint
import logging
import pandas as pd
import sys
import json
import time
import boto3
import decouple
from io import StringIO
import urllib
from flaskext.markdown import Markdown
from flask_misaka import Misaka
logging.basicConfig(level=logging.INFO)
logger=logging.getLogger(__name__)
logger.info('Starting wormcells-de...')
flask_app = Flask(__name__)
# set proper loggin levels for gunicorn, taken from:
# https://medium.com/@trstringer/logging-flask-and-gunicorn-the-manageable-way-2e6f0b8beb2f
if __name__ != '__main__':
gunicorn_logger = logging.getLogger('gunicorn.error')
flask_app.logger.setLevel(gunicorn_logger.level)
Misaka(flask_app, math_explicit = True)
tables = Blueprint('tables', __name__, url_prefix='/tables')
# df with the number of cells of each label in each dataset
df = pd.read_csv(flask_app.open_resource('df.csv'))
# to render the table titles better we replace underscores with spaces,
# use non breaking hyphens (‑) and say batch1 instead of just 1
df_nice_names = df.copy()
df_nice_names.columns = df_nice_names.columns.str.replace('_',' ')
df_nice_names.columns = df_nice_names.columns.str.replace('cho-1 1','cho-1 batch1')
df_nice_names.columns = df_nice_names.columns.str.replace('cho-1 2','cho-1 batch2')
df_nice_names.columns = df_nice_names.columns.str.replace('unc-47 2','unc-47 batch2')
df_nice_names.columns = df_nice_names.columns.str.replace('unc-47 1','unc-47 batch1')
df_nice_names.columns = df_nice_names.columns.str.replace('-','‑')
# same for cell type names
# df_nice_names['Cell Type']= df_nice_names['Cell Type'].str.replace('_',' ')
# convert df to dict for sending as json to datatables
dict_df = df_nice_names.to_dict(orient='records')
# convert column names into dict for sending as json to datatables
columns = [{"data": item, "title": item} for item in df_nice_names.columns]
#### datatables ####
@tables.route("/", methods=['GET'])
def clientside_table_content():
return jsonify({'data': dict_df, 'columns': columns})
flask_app.register_blueprint(tables)
@flask_app.route("/")
def clientside_table():
return render_template("clientside_table.html")
####
@flask_app.route("/test")
def test():
return render_template("test.html")
# @flask_app.route("/")
# def index():
# logger.info('Got a request for index!')
# return render_template("index.html")
@flask_app.route('/submit', methods=['POST', 'GET'])
def receive_submission():
logger.info('Got a submission!')
# answer is a dict of json strings containing selected row and column index numbers
answer = request.form.to_dict(flat=False)
print(answer)
print(df.head())
#first try is in case submission is from table form
try:
# need to convert the json strings to dict, then to a data frame
# data1 is the selection for the first group, data2 for the second
data1 = json.loads(answer['data1'][0])
data1_df = pd.DataFrame.from_dict(data1[0])
print(data1_df)
data2 = json.loads(answer['data2'][0])
data2_df = pd.DataFrame.from_dict(data2[0])
# now map the index number to experiment name and cell type name
group1_df = pd.DataFrame()
group1_df['cell_type1'] = data1_df['row'].map(df['Cell Type'])
group1_df['experiment1'] = data1_df['column'].map(pd.Series(df.columns.values))
print(group1_df)
group2_df = pd.DataFrame()
group2_df['cell_type2'] = data2_df['row'].map(df['Cell Type'])
group2_df['experiment2'] = data2_df['column'].map(
|
pd.Series(df.columns.values)
|
pandas.Series
|
# ____ ____
# / /\/ /
# /___/ \ / Copyright (c) 2021, Xilinx®.
# \ \ \/ Author: <NAME> <<EMAIL>>
# \ \
# / /
# /___/ /\
# \ \ / \
# \___\/\___\
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tabnanny import verbose
from turtle import width
from launch import LaunchDescription
import bt2
import sys
import datetime
import os
from wasabi import color
from typing import List, Optional, Tuple, Union
import pandas as pd
import numpy as np
import pprint
from bokeh.plotting.figure import figure, Figure
from bokeh.plotting import output_notebook
from bokeh.io import show
from bokeh.layouts import row
from bokeh.models import ColumnDataSource, DatetimeTickFormatter, PrintfTickFormatter, Legend, Segment
from bokeh.models.annotations import Label
# color("{:02x}".format(x), fg=16, bg="green")
# debug = True # debug flag, set to True if desired
def get_change(first, second):
"""
Get change in percentage between two values
"""
if first == second:
return 0
try:
return (abs(first - second) / second) * 100.0
except ZeroDivisionError:
return float("inf")
def add_durations_to_figure(
figure: Figure,
segment_type: str,
durations: List[Union[Tuple[datetime.datetime, datetime.datetime]]],
color: str,
line_width: int = 60,
legend_label: Optional[str] = None,
) -> None:
for duration in durations:
duration_begin, duration_end, _ = duration
base_kwargs = dict()
if legend_label:
base_kwargs['legend_label'] = legend_label
figure.line(
x=[duration_begin, duration_end],
y=[segment_type, segment_type],
color=color,
line_width=line_width,
**base_kwargs,
)
def add_markers_to_figure(
figure: Figure,
segment_type: str,
times: List[datetime.datetime],
color: str,
line_width: int = 60,
legend_label: Optional[str] = None,
size: int = 30,
marker_type: str = 'diamond',
) -> None:
for time in times:
base_kwargs = dict()
if legend_label:
base_kwargs['legend_label'] = legend_label
if marker_type == 'diamond':
figure.diamond(
x=[time],
y=[segment_type],
fill_color=color,
line_color=color,
size=size,
**base_kwargs,
)
elif marker_type == 'plus':
figure.plus(
x=[time],
y=[segment_type],
fill_color=color,
line_color=color,
size=size,
**base_kwargs,
)
else:
assert False, 'invalid marker_type value'
def msgsets_from_trace(tracename):
global target_chain
# Create a trace collection message iterator from the first command-line
# argument.
msg_it = bt2.TraceCollectionMessageIterator(tracename)
# Iterate the trace messages and pick ros2 ones
image_pipeline_msgs = []
for msg in msg_it:
# `bt2._EventMessageConst` is the Python type of an event message.
if type(msg) is bt2._EventMessageConst:
# An event message holds a trace event.
event = msg.event
# Only check `sched_switch` events.
if ("ros2" in event.name):
image_pipeline_msgs.append(msg)
# Form sets with each pipeline
image_pipeline_msg_sets = []
new_set = [] # used to track new complete sets
chain_index = 0 # track where in the chain we are so far
vpid_chain = -1 # used to track a set and differentiate from other callbacks
# NOTE: NOT CODED FOR MULTIPLE NODES RUNNING CONCURRENTLY
# this classification is going to miss the initial matches because
# "ros2:callback_start" will not be associated with the target chain and it won't stop
# being considered until a "ros2:callback_end" of that particular process is seen
for index in range(len(image_pipeline_msgs)):
# first one
if chain_index == 0 and image_pipeline_msgs[index].event.name == target_chain[chain_index]:
new_set.append(image_pipeline_msgs[index])
vpid_chain = image_pipeline_msgs[index].event.common_context_field.get("vpid")
chain_index += 1
# print(color("Found: " + str(image_pipeline_msgs[index].event.name) + " - " + str([x.event.name for x in new_set]), fg="blue"))
# last one
elif image_pipeline_msgs[index].event.name == target_chain[chain_index] and target_chain[chain_index] == target_chain[-1] and \
new_set[-1].event.name == target_chain[-2] and \
image_pipeline_msgs[index].event.common_context_field.get("vpid") == vpid_chain:
new_set.append(image_pipeline_msgs[index])
image_pipeline_msg_sets.append(new_set)
# print(color("Found: " + str(image_pipeline_msgs[index].event.name) + " - " + str([x.event.name for x in new_set]), fg="blue"))
chain_index = 0 # restart
new_set = [] # restart
# match
elif image_pipeline_msgs[index].event.name == target_chain[chain_index] and \
image_pipeline_msgs[index].event.common_context_field.get("vpid") == vpid_chain:
new_set.append(image_pipeline_msgs[index])
chain_index += 1
# print(color("Found: " + str(image_pipeline_msgs[index].event.name), fg="green"))
# altered order
elif image_pipeline_msgs[index].event.name in target_chain and \
image_pipeline_msgs[index].event.common_context_field.get("vpid") == vpid_chain:
new_set.append(image_pipeline_msgs[index])
# print(color("Altered order: " + str([x.event.name for x in new_set]) + ", restarting", fg="red"))
chain_index = 0 # restart
new_set = [] # restart
return image_pipeline_msg_sets
def msgsets_from_trace_concurrent(tracename):
global target_chain
# NOTE: considered chains of "ros2:rclcpp_publish" roughly
# Create a trace collection message iterator from the first command-line
# argument.
msg_it = bt2.TraceCollectionMessageIterator(tracename)
# Iterate the trace messages and pick ros2 ones
image_pipeline_msgs = []
for msg in msg_it:
# `bt2._EventMessageConst` is the Python type of an event message.
if type(msg) is bt2._EventMessageConst:
# An event message holds a trace event.
event = msg.event
# Only check `sched_switch` events.
if ("ros2" in event.name):
image_pipeline_msgs.append(msg)
# Form sets with each pipeline
image_pipeline_msg_sets = []
candidates = {} # dict of sets (vpid as key) being considered as candicates to be complete
# NOTE:
# - vpid remains the same for all Components in an executor, even if multithreaded
# - vtid changes for each component in a multithreaded executor
for trace in image_pipeline_msgs:
vtid = trace.event.common_context_field.get("vtid")
if trace.event.name == target_chain[0]:
if (vtid in candidates) and (candidates[vtid][-1].event.name == target_chain[-1]): # account for chained traces, use "ros2:callback_end"
# print(color("Continuing: " + str(trace.event.name), fg="green"))
candidates[vtid].append(trace)
elif vtid in candidates:
# print(color("Already a set, re-starting: " + str(trace.event.name) + " - " \
# + str([x.event.name for x in candidates[vtid]]) , fg="yellow"))
candidates[vtid] = [trace] # already a set existing (pop and) re-start
else:
candidates[vtid] = [trace] # new set
# print(color("New: " + str(trace.event.name) + " - " + \
# str([x.event.name for x in candidates[vtid]]), fg="blue"))
elif (trace.event.name in target_chain) and (vtid in candidates):
if len(candidates[vtid]) >= 9 and (trace.event.name in target_chain[9:]):
trace_index = target_chain[9:].index(trace.event.name) + 9
expected_index = target_chain[9:].index(candidates[vtid][-1].event.name) + 1 + 9
elif len(candidates[vtid]) >= 9:
# print(color("Skipping: " + str(trace.event.name), fg="yellow"))
continue # skip
else:
trace_index = target_chain.index(trace.event.name)
expected_index = target_chain.index(candidates[vtid][-1].event.name) + 1
# Account for chains of callbacks
if trace.event.name == target_chain[-1] and candidates[vtid][-1].event.name == target_chain[0]:
if len(candidates[vtid]) > 1:
candidates[vtid] = candidates[vtid][:-1] # pop last start and continue looking
# print(color("Chain of callbacks, popping: " + str(trace.event.name) , fg="yellow"))
else:
candidates.pop(vtid)
# print(color("Chain of callbacks while starting, popping: " + str(trace.event.name) , fg="yellow"))
elif trace_index == expected_index:
candidates[vtid].append(trace)
# print(color("Found: " + str(trace.event.name), fg="green"))
if trace.event.name == target_chain[-1] and candidates[vtid][-2].event.name == target_chain[-2] \
and len(candidates[vtid]) == len(target_chain): # last one
image_pipeline_msg_sets.append(candidates[vtid])
# print(color("complete set!", fg="pink"))
candidates.pop(vtid)
else:
if trace.event.name == "ros2:rclcpp_publish" or \
trace.event.name == "ros2:rcl_publish" or \
trace.event.name == "ros2:rmw_publish":
# print(color("Potential chain of publish: " + str(trace.event.name) + ", skipping" , fg="yellow"))
pass
else:
candidates[vtid].append(trace)
# print(color("Altered order: " + str([x.event.name for x in candidates[vtid]]) + ", discarding", fg="red"))
candidates.pop(vtid)
else:
# print(color("Skipped: " + str(trace.event.name), fg="grey"))
pass
return image_pipeline_msg_sets
def barplot_all(image_pipeline_msg_sets, title="Barplot"):
global target_chain
global target_chain_dissambiguous
image_pipeline_msg_sets_ns = []
for set_index in range(len(image_pipeline_msg_sets)):
aux_set = []
target_chain_ns = []
for msg_index in range(len(image_pipeline_msg_sets[set_index])):
target_chain_ns.append(image_pipeline_msg_sets[set_index][msg_index].default_clock_snapshot.ns_from_origin)
init_ns = target_chain_ns[0]
for msg_index in range(len(image_pipeline_msg_sets[set_index])):
aux_set.append((target_chain_ns[msg_index] - init_ns)/1e6)
image_pipeline_msg_sets_ns.append(aux_set)
df = pd.DataFrame(image_pipeline_msg_sets_ns)
df.columns = target_chain_dissambiguous
import plotly.express as px
# pd.set_option("display.max_rows", None, "display.max_columns", None)
# print(df)
fig = px.box(
df,
points="all",
template="plotly_white",
title=title,
)
fig.update_xaxes(title_text = "Trace event")
fig.update_yaxes(title_text = "Milliseconds")
fig.show()
def traces(msg_set):
global target_chain_colors_fg_bokeh
global segment_types
global target_chain_marker
global target_chain
global target_chain_layer
fig = figure(
title='Image pipeline tracing',
x_axis_label=f'Milliseconds',
y_range=segment_types,
plot_width=2000,
plot_height=600,
)
fig.title.align = 'center'
fig.title.text_font_size = '20px'
# fig.xaxis[0].formatter = DatetimeTickFormatter(milliseconds = ['%3Nms'])
fig.xaxis[0].formatter = PrintfTickFormatter(format="%f ms")
fig.xaxis[0].ticker.desired_num_ticks = 20
fig.xaxis[0].axis_label_text_font_size = '30px'
fig.yaxis[0].major_label_text_font_size = '25px'
target_chain_ns = []
for msg_index in range(len(msg_set)):
target_chain_ns.append(msg_set[msg_index].default_clock_snapshot.ns_from_origin)
init_ns = target_chain_ns[0]
print("1")
# draw durations
## rclcpp callbacks - rectify
callback_start = (target_chain_ns[0] - init_ns)/1e6
callback_end = (target_chain_ns[8] - init_ns)/1e6
duration = callback_end - callback_start
add_durations_to_figure(
fig,
target_chain_layer[0],
[(callback_start, callback_start + duration, duration)],
'lightgray'
)
## rclcpp callbacks - resize
callback_start = (target_chain_ns[9] - init_ns)/1e6
callback_end = (target_chain_ns[17] - init_ns)/1e6
duration = callback_end - callback_start
add_durations_to_figure(
fig,
target_chain_layer[0],
[(callback_start, callback_start + duration, duration)],
'lightgray'
)
## rectify callback
callback_start = (target_chain_ns[1] - init_ns)/1e6
callback_end = (target_chain_ns[7] - init_ns)/1e6
duration = callback_end - callback_start
add_durations_to_figure(
fig,
target_chain_layer[1],
[(callback_start, callback_start + duration, duration)],
'whitesmoke'
)
## rectify op
callback_start = (target_chain_ns[2] - init_ns)/1e6
callback_end = (target_chain_ns[3] - init_ns)/1e6
duration = callback_end - callback_start
add_durations_to_figure(
fig,
target_chain_layer[1],
[(callback_start, callback_start + duration, duration)],
'seashell'
)
## resize callback
callback_start = (target_chain_ns[10] - init_ns)/1e6
callback_end = (target_chain_ns[16] - init_ns)/1e6
duration = callback_end - callback_start
add_durations_to_figure(
fig,
target_chain_layer[1],
[(callback_start, callback_start + duration, duration)],
'whitesmoke'
)
## resize op
callback_start = (target_chain_ns[11] - init_ns)/1e6
callback_end = (target_chain_ns[12] - init_ns)/1e6
duration = callback_end - callback_start
add_durations_to_figure(
fig,
target_chain_layer[1],
[(callback_start, callback_start + duration, duration)],
'seashell'
)
print("2")
for msg_index in range(len(msg_set)):
# add_markers_to_figure(fig, msg_set[msg_index].event.name, [(target_chain_ns[msg_index] - init_ns)/1e6], 'blue', marker_type='plus', legend_label='timing')
print("marker ms: " + str((target_chain_ns[msg_index] - init_ns)/1e6))
add_markers_to_figure(
fig,
target_chain_layer[msg_index],
[(target_chain_ns[msg_index] - init_ns)/1e6],
target_chain_colors_fg_bokeh[msg_index],
marker_type=target_chain_marker[msg_index],
# legend_label=msg_set[msg_index].event.name,
legend_label=target_chain_dissambiguous[msg_index],
size=10,
)
if "image_proc_resize_init" in msg_set[msg_index].event.name:
label = Label(
x= (target_chain_ns[msg_index] - init_ns)/1e6,
y=target_chain_label_layer[msg_index],
x_offset=0,
y_offset=-90,
text=target_chain_dissambiguous[msg_index].split(":")[-1]
)
elif "image_proc_rectify_init" in msg_set[msg_index].event.name:
label = Label(
x= (target_chain_ns[msg_index] - init_ns)/1e6,
y=target_chain_label_layer[msg_index],
x_offset=0,
y_offset=-100,
text=target_chain_dissambiguous[msg_index].split(":")[-1]
)
elif "image_proc_rectify_fini" in msg_set[msg_index].event.name:
label = Label(
x= (target_chain_ns[msg_index] - init_ns)/1e6,
y=target_chain_label_layer[msg_index],
x_offset=-60,
y_offset=-50,
text=target_chain_dissambiguous[msg_index].split(":")[-1]
)
elif "image_proc_rectify_cb_fini" in msg_set[msg_index].event.name:
label = Label(
x= (target_chain_ns[msg_index] - init_ns)/1e6,
y=target_chain_label_layer[msg_index],
x_offset=-30,
y_offset=-50,
text=target_chain_dissambiguous[msg_index].split(":")[-1]
)
elif "callback_start" in msg_set[msg_index].event.name:
label = Label(
x= (target_chain_ns[msg_index] - init_ns)/1e6,
y=target_chain_label_layer[msg_index],
x_offset=-30,
y_offset=-90,
text=target_chain_dissambiguous[msg_index].split(":")[-1]
)
elif "image_proc_resize_fini" in msg_set[msg_index].event.name:
label = Label(
x= (target_chain_ns[msg_index] - init_ns)/1e6,
y=target_chain_label_layer[msg_index],
x_offset=20,
y_offset=-50,
text=target_chain_dissambiguous[msg_index].split(":")[-1]
)
else:
label = Label(
x= (target_chain_ns[msg_index] - init_ns)/1e6,
y=target_chain_label_layer[msg_index],
x_offset=-30,
y_offset=-30,
text=target_chain_dissambiguous[msg_index].split(":")[-1]
)
fig.add_layout(label)
# hack legend to the right
fig.legend.location = "right"
new_legend = fig.legend[0]
fig.legend[0] = None
fig.add_layout(new_legend, 'right')
show(fig)
def barchart_data(image_pipeline_msg_sets):
"""Converts a tracing message list into its corresponding
relative (to the previous tracepoint) latency list in
millisecond units.
Args:
image_pipeline_msg_sets ([type]): [description]
Returns:
list: list of relative latencies, in ms
"""
image_pipeline_msg_sets_ns = []
for set_index in range(len(image_pipeline_msg_sets)):
aux_set = []
target_chain_ns = []
for msg_index in range(len(image_pipeline_msg_sets[set_index])):
target_chain_ns.append(image_pipeline_msg_sets[set_index][msg_index].default_clock_snapshot.ns_from_origin)
for msg_index in range(len(image_pipeline_msg_sets[set_index])):
if msg_index == 0:
previous = target_chain_ns[0]
else:
previous = target_chain_ns[msg_index - 1]
aux_set.append((target_chain_ns[msg_index] - previous)/1e6)
image_pipeline_msg_sets_ns.append(aux_set)
return image_pipeline_msg_sets_ns
def print_timeline(image_pipeline_msg_sets):
global target_chain
global target_chain_colors_fg
for msg_set in image_pipeline_msg_sets:
if len(msg_set) != len(target_chain):
print(color("Not a complete set: " + str([x.event.name for x in msg_set]), fg="red"))
pass
else:
target_chain_ns = []
for msg_index in range(len(msg_set)):
target_chain_ns.append(msg_set[msg_index].default_clock_snapshot.ns_from_origin)
init_ns = target_chain_ns[0]
fixed_target_chain_ns = [init_ns] + target_chain_ns
# stringout = color("raw image → " + msg_set[0].event.name + " → ")
stringout = color("raw image ")
for msg_index in range(len(msg_set)):
stringout +=" → " + color(msg_set[msg_index].event.name + \
" ({} ms) ".format((fixed_target_chain_ns[msg_index + 1] - fixed_target_chain_ns[msg_index])/1e6),
fg=target_chain_colors_fg[msg_index], bg="black")
# stringout += " → " + msg_set[msg_index].event.name + \
# " ({} ms) ".format((fixed_target_chain_ns[msg_index + 1] - fixed_target_chain_ns[msg_index])/1e6)
stringout += color("total " + \
" ({} ms) ".format((target_chain_ns[-1] - target_chain_ns[0])/1e6), fg="black", bg="white")
print(stringout)
def rms(list):
return np.sqrt(np.mean(np.array(list)**2))
def mean(list):
return np.mean(np.array(list))
def max(list):
return np.max(np.array(list))
def min(list):
return np.min(np.array(list))
def rms_sets(image_pipeline_msg_sets, indices=None):
"""
Root-Mean-Square (RMS) (in the units provided) for a
given number of time trace sets.
NOTE: last value of the lists should not include the total
:param: image_pipeline_msg_sets, list of lists, each containing the time traces
:param: indices, list of indices to consider on each set which will be summed
for rms. By default, sum of all values on each set.
"""
if indices:
with_indices_sets = []
for set in image_pipeline_msg_sets:
indices_sum = 0
for i in indices:
indices_sum += set[i]
with_indices_sets.append(indices_sum)
return rms(with_indices_sets)
else:
total_in_sets = [sum(set) for set in image_pipeline_msg_sets]
return rms(total_in_sets)
def mean_sets(image_pipeline_msg_sets, indices=None):
if indices:
with_indices_sets = []
for set in image_pipeline_msg_sets:
indices_sum = 0
for i in indices:
indices_sum += set[i]
with_indices_sets.append(indices_sum)
return mean(with_indices_sets)
else:
total_in_sets = [sum(set) for set in image_pipeline_msg_sets]
return mean(total_in_sets)
def max_sets(image_pipeline_msg_sets, indices=None):
if indices:
with_indices_sets = []
for set in image_pipeline_msg_sets:
indices_sum = 0
for i in indices:
indices_sum += set[i]
with_indices_sets.append(indices_sum)
return max(with_indices_sets)
else:
total_in_sets = [sum(set) for set in image_pipeline_msg_sets]
return max(total_in_sets)
def min_sets(image_pipeline_msg_sets, indices=None):
if indices:
with_indices_sets = []
for set in image_pipeline_msg_sets:
indices_sum = 0
for i in indices:
indices_sum += set[i]
with_indices_sets.append(indices_sum)
return min(with_indices_sets)
else:
total_in_sets = [sum(set) for set in image_pipeline_msg_sets]
return min(total_in_sets)
def print_timeline_average(image_pipeline_msg_sets):
"""
Doing averages may lead to negative numbers while substracting the previous average.
This is only useful to get an intuition of the totals.
"""
global target_chain
global target_chain_colors_fg
image_pipeline_msg_sets_ns = []
for msg_set in image_pipeline_msg_sets:
if len(msg_set) != len(target_chain):
print(color("Not a complete set: " + str([x.event.name for x in msg_set]), fg="red"))
pass
else:
target_chain_ns = []
final_target_chain_ns = []
for msg_index in range(len(msg_set)):
target_chain_ns.append(msg_set[msg_index].default_clock_snapshot.ns_from_origin)
init_ns = target_chain_ns[0]
fixed_target_chain_ns = [init_ns] + target_chain_ns
for msg_index in range(len(msg_set)):
final_target_chain_ns.append((fixed_target_chain_ns[msg_index + 1] - fixed_target_chain_ns[msg_index]))
final_target_chain_ns.append((fixed_target_chain_ns[-1] - fixed_target_chain_ns[0])) # total
image_pipeline_msg_sets_ns.append(final_target_chain_ns)
image_pipeline_msg_ns_average = [sum(x) / len(x) for x in zip(*image_pipeline_msg_sets_ns)]
# print(image_pipeline_msg_ns_average)
stringout = color("raw image ")
for msg_index in range(len(image_pipeline_msg_ns_average[:-1])):
stringout +=" → " + color(image_pipeline_msg_sets[0][msg_index].event.name + \
" ({} ms) ".format((image_pipeline_msg_ns_average[msg_index + 1] - image_pipeline_msg_ns_average[msg_index])/1e6),
fg=target_chain_colors_fg[msg_index], bg="black")
stringout += color("total " + \
" ({} ms) ".format((image_pipeline_msg_ns_average[-1] - image_pipeline_msg_ns_average[0])/1e6), fg="black", bg="white")
print(stringout)
def statistics(image_pipeline_msg_sets_ms, verbose=False):
global target_chain_dissambiguous
mean_ = mean_sets(image_pipeline_msg_sets_ms)
rms_ = rms_sets(image_pipeline_msg_sets_ms)
min_ = min_sets(image_pipeline_msg_sets_ms)
max_ = max_sets(image_pipeline_msg_sets_ms)
mean_accelerators = mean_sets(image_pipeline_msg_sets_ms,
[
target_chain_dissambiguous.index("ros2_image_pipeline:image_proc_rectify_fini"),
target_chain_dissambiguous.index("ros2_image_pipeline:image_proc_resize_fini"),
]
)
rms_accelerators = rms_sets(image_pipeline_msg_sets_ms,
[
target_chain_dissambiguous.index("ros2_image_pipeline:image_proc_rectify_fini"),
target_chain_dissambiguous.index("ros2_image_pipeline:image_proc_resize_fini"),
]
)
max_accelerators = max_sets(image_pipeline_msg_sets_ms,
[
target_chain_dissambiguous.index("ros2_image_pipeline:image_proc_rectify_fini"),
target_chain_dissambiguous.index("ros2_image_pipeline:image_proc_resize_fini"),
]
)
min_accelerators = min_sets(image_pipeline_msg_sets_ms,
[
target_chain_dissambiguous.index("ros2_image_pipeline:image_proc_rectify_fini"),
target_chain_dissambiguous.index("ros2_image_pipeline:image_proc_resize_fini"),
]
)
if verbose:
print(color("mean: " + str(mean_), fg="yellow"))
print("rms: " + str(rms_))
print("min: " + str(min_))
print(color("max: " + str(max_), fg="red"))
print(color("mean accelerators: " + str(mean_accelerators), fg="yellow"))
print("rms accelerators: " + str(rms_accelerators))
print("min accelerators: " + str(min_accelerators))
print(color("max accelerators: " + str(max_accelerators), fg="red"))
return [mean_accelerators, rms_accelerators, max_accelerators, min_accelerators, mean_, rms_, max_, min_]
def table(list_sets, list_sets_names):
"""
Creates a markdown table from a list of sets
NOTE: assumes base is always the first set in list_sets, which
is then used to calculate % of change.
"""
list_statistics = []
# generate statistics
for sets in list_sets:
list_statistics.append(statistics(sets))
# Add name to each statistics list
for stat_list_index in range(len(list_statistics)):
list_statistics[stat_list_index].insert(0, list_sets_names[stat_list_index])
# add headers
list_statistics.insert(0, ["---", "---", "---", "---", "---", "---", "---", "---", "---",])
list_statistics.insert(0, [
" ", "Accel. Mean", "Accel. RMS",
"Accel. Max ", "Accel. Min", "Mean",
"RMS", "Max", "Min"])
baseline = list_statistics[2] # baseline for %
length_list = [len(row) for row in list_statistics]
column_width = max(length_list)
count = 0
for row in list_statistics:
row_str = " | "
if count == 2:
for element_index in range(len(row)):
if type(row[element_index]) != str:
if row[element_index] > baseline[element_index]:
row_str += "**{:.2f}** ms".format(row[element_index]) + " (:small_red_triangle_down: `" \
+ "{:.2f}".format(get_change(row[element_index], baseline[element_index])) + "`%) | "
else:
row_str += "**{:.2f}** ms".format(row[element_index]) + " (`" \
+ "{:.2f}".format(get_change(row[element_index], baseline[element_index])) + "`%) | "
else:
row_str += row[element_index] + " | "
else:
for element_index in range(len(row)):
if type(row[element_index]) != str:
if row[element_index] > baseline[element_index]:
row_str += "{:.2f} ms".format(row[element_index]) + " (:small_red_triangle_down: `" \
+ "{:.2f}".format(get_change(row[element_index], baseline[element_index])) + "`%) | "
else:
row_str += "{:.2f} ms".format(row[element_index]) + " (`" \
+ "{:.2f}".format(get_change(row[element_index], baseline[element_index])) + "`%) | "
else:
row_str += row[element_index] + " | "
count += 1
print(row_str)
# if count == 2:
# row = "|" + "|".join("**{:.2f}** ms".format(row[element_index]) + " (`"
# + "{:.2f}".format(get_change(row[element_index], baseline[element_index])) + "`%)"
# if type(row[element_index]) != str
# else row[element_index]
# for element_index in range(len(row))) + "|"
# else:
# row = "|" + "|".join("{:.2f} ms".format(row[element_index]) + " (`"
# + "{:.2f}".format(get_change(row[element_index], baseline[element_index])) + "`%)"
# if type(row[element_index]) != str else row[element_index]
# for element_index in range(len(row))) + "|"
# count += 1
# print(row)
def generate_launch_description():
return LaunchDescription()
##############################
##############################
# targeted chain of messages for tracing
target_chain = [
"ros2:callback_start",
"ros2_image_pipeline:image_proc_rectify_cb_init",
"ros2_image_pipeline:image_proc_rectify_init",
"ros2_image_pipeline:image_proc_rectify_fini",
"ros2:rclcpp_publish",
"ros2:rcl_publish",
"ros2:rmw_publish",
"ros2_image_pipeline:image_proc_rectify_cb_fini",
"ros2:callback_end",
"ros2:callback_start",
"ros2_image_pipeline:image_proc_resize_cb_init",
"ros2_image_pipeline:image_proc_resize_init",
"ros2_image_pipeline:image_proc_resize_fini",
"ros2:rclcpp_publish",
"ros2:rcl_publish",
"ros2:rmw_publish",
"ros2_image_pipeline:image_proc_resize_cb_fini",
"ros2:callback_end",
]
target_chain_dissambiguous = [
"ros2:callback_start",
"ros2_image_pipeline:image_proc_rectify_cb_init",
"ros2_image_pipeline:image_proc_rectify_init",
"ros2_image_pipeline:image_proc_rectify_fini",
"ros2:rclcpp_publish",
"ros2:rcl_publish",
"ros2:rmw_publish",
"ros2_image_pipeline:image_proc_rectify_cb_fini",
"ros2:callback_end",
"ros2:callback_start (2)",
"ros2_image_pipeline:image_proc_resize_cb_init",
"ros2_image_pipeline:image_proc_resize_init",
"ros2_image_pipeline:image_proc_resize_fini",
"ros2:rclcpp_publish (2)",
"ros2:rcl_publish (2)",
"ros2:rmw_publish (2)",
"ros2_image_pipeline:image_proc_resize_cb_fini",
"ros2:callback_end (2)",
]
target_chain_colors_fg = [
"blue",
"yellow",
"red",
"red",
"blue",
"blue",
"blue",
"yellow",
"blue",
"blue",
"yellow",
"red",
"red",
"blue",
"blue",
"blue",
"yellow",
"blue",
]
# target_chain_colors_fg_bokeh = [
# "lightgray",
# "silver",
# "darkgray",
# "gray",
# "dimgray",
# "lightslategray",
# "slategray",
# "darkslategray",
# "black",
# "burlywood",
# "tan",
# "rosybrown",
# "sandybrown",
# "goldenrod",
# "darkgoldenrod",
# "peru",
# "chocolate",
# "saddlebrown",
# # "blue",
# # "blueviolet",
# # "brown",
# # "burlywood",
# # "cadetblue",
# # "chartreuse",
# # "chocolate",
# # "coral",
# # "cornflowerblue",
# ]
target_chain_colors_fg_bokeh = [
"lightsalmon",
"salmon",
"darksalmon",
"lightcoral",
"indianred",
"crimson",
"firebrick",
"darkred",
"red",
"lavender",
"thistle",
"plum",
"fuchsia",
"mediumorchid",
"mediumpurple",
"darkmagenta",
"indigo",
"mediumslateblue",
]
target_chain_layer = [
"rclcpp",
"userland",
"userland",
"userland",
"rclcpp",
"rcl",
"rmw",
"userland",
"rclcpp",
"rclcpp",
"userland",
"userland",
"userland",
"rclcpp",
"rcl",
"rmw",
"userland",
"rclcpp",
]
target_chain_label_layer = [ # associated with the layer
3,
4,
4,
4,
3,
2,
1,
4,
3,
3,
4,
4,
4,
3,
2,
1,
4,
3,
]
target_chain_marker = [
"diamond",
"plus",
"plus",
"plus",
"plus",
"plus",
"plus",
"plus",
"diamond",
"diamond",
"plus",
"plus",
"plus",
"plus",
"plus",
"plus",
"plus",
"diamond",
]
# For some reason it seems to be displayed in the reverse order on the Y axis
segment_types = [
"rmw",
"rcl",
"rclcpp",
"userland"
]
# # ####################
# # print timing pipeline
# # ####################
# # image_pipeline_msg_sets = msgsets_from_trace_concurrent(str(os.environ["HOME"]) + "/.ros/tracing/trace_rectify_resize_fpga")
# # # print(len(image_pipeline_msg_sets))
# # # print_timeline(image_pipeline_msg_sets) # all timelines
# # print_timeline([image_pipeline_msg_sets[-1]]) # timeline of last message
# # # print_timeline_average(image_pipeline_msg_sets) # timeline of averages, NOTE only totals are of interest
# target_chain = [
# "ros2:callback_start",
# "ros2_image_pipeline:image_proc_rectify_cb_init",
# "ros2_image_pipeline:image_proc_rectify_init",
# "ros2_image_pipeline:image_proc_rectify_fini",
# "ros2:rclcpp_publish",
# "ros2:rcl_publish",
# "ros2:rmw_publish",
# "ros2_image_pipeline:image_proc_rectify_cb_fini",
# "ros2:callback_end",
# ]
# target_chain_colors_fg = [
# "blue",
# "yellow",
# "red",
# "red",
# "blue",
# "blue",
# "blue",
# "yellow",
# "blue",
# ]
# # image_pipeline_msg_sets = msgsets_from_trace_concurrent(str(os.environ["HOME"]) + "/.ros/tracing/trace_rectify_resize_fpga_integrated")
# image_pipeline_msg_sets = msgsets_from_trace_concurrent(str(os.environ["HOME"]) + "/.ros/tracing/trace_rectify_resize_fpga_integrated_250_node")
# # print(len(image_pipeline_msg_sets))
# # print_timeline(image_pipeline_msg_sets) # all timelines
# # print_timeline([image_pipeline_msg_sets[-1]]) # timeline of last message
# print_timeline(image_pipeline_msg_sets[-10:]) # timeline of last 10 messages
# # print_timeline_average(image_pipeline_msg_sets) # timeline of averages, NOTE only totals are of interest
######################
# draw tracepoints
######################
# image_pipeline_msg_sets = msgsets_from_trace_concurrent(str(os.environ["HOME"]) + "/.ros/tracing/trace_rectify_resize")
# msg_set = image_pipeline_msg_sets[-1]
# traces(msg_set)
# ######################
# # draw barplot all data
# ######################
# # # NOTE: Discard first few
# image_pipeline_msg_sets = msgsets_from_trace_concurrent(str(os.environ["HOME"]) + "/.ros/tracing/trace_rectify_resize")
# barplot_all(image_pipeline_msg_sets[10:], title="image_pipeline in CPU")
# image_pipeline_msg_sets = msgsets_from_trace_concurrent(str(os.environ["HOME"]) + "/.ros/tracing/trace_rectify_resize_fpga")
# barplot_all(image_pipeline_msg_sets[10:], title="image_pipeline in FPGA")
# # image_pipeline_msg_sets = msgsets_from_trace_concurrent(str(os.environ["HOME"]) + "/.ros/tracing/trace_rectify_resize_stress")
# # barplot_all(image_pipeline_msg_sets[10:], title="image_pipeline in CPU and with stress")
# # image_pipeline_msg_sets = msgsets_from_trace_concurrent(str(os.environ["HOME"]) + "/.ros/tracing/trace_rectify_resize_fpga_stress")
# # barplot_all(image_pipeline_msg_sets[10:], title="image_pipeline in FPGA and with stress")
# target_chain = [
# "ros2:callback_start",
# "ros2_image_pipeline:image_proc_rectify_cb_init",
# "ros2_image_pipeline:image_proc_rectify_init",
# "ros2_image_pipeline:image_proc_rectify_fini",
# "ros2:rclcpp_publish",
# "ros2:rcl_publish",
# "ros2:rmw_publish",
# "ros2_image_pipeline:image_proc_rectify_cb_fini",
# "ros2:callback_end",
# # "ros2:callback_start",
# # "ros2_image_pipeline:image_proc_resize_cb_init",
# # "ros2_image_pipeline:image_proc_resize_init",
# # "ros2_image_pipeline:image_proc_resize_fini",
# # "ros2:rclcpp_publish",
# # "ros2:rcl_publish",
# # "ros2:rmw_publish",
# # "ros2_image_pipeline:image_proc_resize_cb_fini",
# # "ros2:callback_end",
# ]
# target_chain_dissambiguous = target_chain
# target_chain_colors_fg = [
# "blue",
# "yellow",
# "red",
# "red",
# "blue",
# "blue",
# "blue",
# "yellow",
# "blue",
# # "blue",
# # "yellow",
# # "red",
# # "red",
# # "blue",
# # "blue",
# # "blue",
# # "yellow",
# # "blue",
# ]
# image_pipeline_msg_sets = msgsets_from_trace_concurrent(str(os.environ["HOME"]) + "/.ros/tracing/trace_rectify_resize_fpga_integrated")
# barplot_all(image_pipeline_msg_sets[10:], title="image_pipeline integrated @ 250 MHz in FPGA")
# target_chain = [
# "ros2:callback_start", "ros2_image_pipeline:image_proc_resize_cb_init",
# "ros2_image_pipeline:image_proc_resize_init", "ros2_image_pipeline:image_proc_resize_fini",
# "ros2:rclcpp_publish", "ros2:rcl_publish", "ros2:rmw_publish",
# "ros2_image_pipeline:image_proc_resize_cb_fini", "ros2:callback_end",
# ]
# target_chain_dissambiguous = target_chain
# image_pipeline_msg_sets = msgsets_from_trace_concurrent(str(os.environ["HOME"]) + "/.ros/tracing/trace_test2")
# barplot_all(image_pipeline_msg_sets[10:], title="image_pipeline, streams @ 250 MHz in FPGA")
# ######################
# # draw bar charts
# ######################
#///////////////////
# Data sources
#///////////////////
# # NOTE: Discard first few
discard_count = 10
image_pipeline_msg_sets_ms_cpu = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
+ "/.ros/tracing/trace_rectify_resize")[discard_count:])
image_pipeline_msg_sets_ms_fpga = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
+ "/.ros/tracing/trace_rectify_resize_fpga")[discard_count:])
# image_pipeline_msg_sets_ms_fpga_streamlined = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
# + "/.ros/tracing/trace_rectify_resize_fpga_streamlined")[discard_count:])
# image_pipeline_msg_sets_ms_fpga_streamlined_xrt = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
# + "/.ros/tracing/trace_rectify_resize_fpga_streamlined_xrt")[discard_count:])
image_pipeline_msg_sets_ms_cpu_stress = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
+ "/.ros/tracing/trace_rectify_resize_stress")[discard_count:])
image_pipeline_msg_sets_ms_fpga_stress = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
+ "/.ros/tracing/trace_rectify_resize_fpga_stress")[discard_count:])
target_chain = [
"ros2:callback_start", "ros2_image_pipeline:image_proc_rectify_cb_init",
"ros2_image_pipeline:image_proc_rectify_init", "ros2_image_pipeline:image_proc_rectify_fini",
"ros2:rclcpp_publish", "ros2:rcl_publish", "ros2:rmw_publish",
"ros2_image_pipeline:image_proc_rectify_cb_fini", "ros2:callback_end",
]
image_pipeline_msg_sets_ms_fpga_integrated = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
+ "/.ros/tracing/trace_rectify_resize_fpga_integrated")[discard_count:])
# fix data of "*_integrated" to align with dimensions of the rest
for i_set in range(len(image_pipeline_msg_sets_ms_fpga_integrated)):
image_pipeline_msg_sets_ms_fpga_integrated[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
image_pipeline_msg_sets_ms_fpga_integrated_xrt = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
+ "/.ros/tracing/trace_rectify_resize_fpga_integrated_xrt")[discard_count:])
# fix data of "*_integrated" to align with dimensions of the rest
for i_set in range(len(image_pipeline_msg_sets_ms_fpga_integrated_xrt)):
image_pipeline_msg_sets_ms_fpga_integrated_xrt[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
image_pipeline_msg_sets_ms_fpga_integrated_streamlined = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
+ "/.ros/tracing/trace_rectify_resize_fpga_integrated_streamlined")[discard_count:])
# fix data of "*_integrated" to align with dimensions of the rest
for i_set in range(len(image_pipeline_msg_sets_ms_fpga_integrated_streamlined)):
image_pipeline_msg_sets_ms_fpga_integrated_streamlined[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
image_pipeline_msg_sets_ms_fpga_integrated_streamlined_xrt = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
+ "/.ros/tracing/trace_rectify_resize_fpga_integrated_streamlined_xrt")[discard_count:])
# fix data of "*_integrated" to align with dimensions of the rest
for i_set in range(len(image_pipeline_msg_sets_ms_fpga_integrated_streamlined_xrt)):
image_pipeline_msg_sets_ms_fpga_integrated_streamlined_xrt[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
# stress
image_pipeline_msg_sets_ms_fpga_integrated_stress = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
+ "/.ros/tracing/trace_rectify_resize_fpga_integrated_stress")[discard_count:])
# fix data of "*_integrated" to align with dimensions of the rest
for i_set in range(len(image_pipeline_msg_sets_ms_fpga_integrated_stress)):
image_pipeline_msg_sets_ms_fpga_integrated_stress[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
image_pipeline_msg_sets_ms_fpga_integrated_xrt_stress = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
+ "/.ros/tracing/trace_rectify_resize_fpga_integrated_xrt_stress")[discard_count:])
# fix data of "*_integrated" to align with dimensions of the rest
for i_set in range(len(image_pipeline_msg_sets_ms_fpga_integrated_xrt_stress)):
image_pipeline_msg_sets_ms_fpga_integrated_xrt_stress[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
# image_pipeline_msg_sets_ms_fpga_integrated_streamlined_stress = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
# + "/.ros/tracing/trace_rectify_resize_fpga_integrated_streamlined_stress")[discard_count:])
# # fix data of "*_integrated" to align with dimensions of the rest
# for i_set in range(len(image_pipeline_msg_sets_ms_fpga_integrated_streamlined_stress)):
# image_pipeline_msg_sets_ms_fpga_integrated_streamlined_stress[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
image_pipeline_msg_sets_ms_fpga_integrated_streamlined_xrt_stress = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
+ "/.ros/tracing/trace_rectify_resize_fpga_integrated_streamlined_xrt_stress")[discard_count:])
# fix data of "*_integrated" to align with dimensions of the rest
for i_set in range(len(image_pipeline_msg_sets_ms_fpga_integrated_streamlined_xrt_stress)):
image_pipeline_msg_sets_ms_fpga_integrated_streamlined_xrt_stress[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
target_chain = [
"ros2:callback_start", "ros2_image_pipeline:image_proc_resize_cb_init",
"ros2_image_pipeline:image_proc_resize_init", "ros2_image_pipeline:image_proc_resize_fini",
"ros2:rclcpp_publish", "ros2:rcl_publish", "ros2:rmw_publish",
"ros2_image_pipeline:image_proc_resize_cb_fini", "ros2:callback_end",
]
image_pipeline_msg_sets_ms_fpga_streamlined = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
+ "/.ros/tracing/trace_rectify_resize_fpga_streamlined")[discard_count:])
# fix data of "*_integrated" to align with dimensions of the rest
for i_set in range(len(image_pipeline_msg_sets_ms_fpga_streamlined)):
image_pipeline_msg_sets_ms_fpga_streamlined[i_set] = [0, 0, 0, 0, 0, 0, 0, 0, 0] + image_pipeline_msg_sets_ms_fpga_streamlined[i_set]
image_pipeline_msg_sets_ms_fpga_streamlined_xrt = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
+ "/.ros/tracing/trace_rectify_resize_fpga_streamlined_xrt")[discard_count:])
# fix data of "*_integrated" to align with dimensions of the rest
for i_set in range(len(image_pipeline_msg_sets_ms_fpga_streamlined_xrt)):
image_pipeline_msg_sets_ms_fpga_streamlined_xrt[i_set] = [0, 0, 0, 0, 0, 0, 0, 0, 0] + image_pipeline_msg_sets_ms_fpga_streamlined_xrt[i_set]
# image_pipeline_msg_sets_ms_fpga_integrated = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
# + "/.ros/tracing/trace_rectify_resize_fpga_integrated")[discard_count:])
# # fix data of "*_integrated" to align with dimensions of the rest
# for i_set in range(len(image_pipeline_msg_sets_ms_fpga_integrated)):
# image_pipeline_msg_sets_ms_fpga_integrated[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
# image_pipeline_msg_sets_ms_fpga_integrated_200 = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
# + "/.ros/tracing/trace_rectify_resize_fpga_integrated_200")[discard_count:])
# # fix data of "*_integrated" to align with dimensions of the rest
# for i_set in range(len(image_pipeline_msg_sets_ms_fpga_integrated_200)):
# image_pipeline_msg_sets_ms_fpga_integrated_200[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
# image_pipeline_msg_sets_ms_fpga_integrated_250 = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
# + "/.ros/tracing/trace_rectify_resize_fpga_integrated_250")[discard_count:])
# # fix data of "*_integrated" to align with dimensions of the rest
# for i_set in range(len(image_pipeline_msg_sets_ms_fpga_integrated_250)):
# image_pipeline_msg_sets_ms_fpga_integrated_250[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
# image_pipeline_msg_sets_ms_fpga_integrated_250_stress = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
# + "/.ros/tracing/trace_rectify_resize_fpga_integrated_250_stress")[discard_count:])
# # fix data of "*_integrated" to align with dimensions of the rest
# for i_set in range(len(image_pipeline_msg_sets_ms_fpga_integrated_250_stress)):
# image_pipeline_msg_sets_ms_fpga_integrated_250_stress[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
# image_pipeline_msg_sets_ms_fpga_integrated_250_xrt = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
# + "/.ros/tracing/trace_rectify_resize_fpga_integrated_250_xrt")[discard_count:])
# # fix data of "*_integrated" to align with dimensions of the rest
# for i_set in range(len(image_pipeline_msg_sets_ms_fpga_integrated_250_xrt)):
# image_pipeline_msg_sets_ms_fpga_integrated_250_xrt[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
# image_pipeline_msg_sets_ms_fpga_streamlined = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
# + "/.ros/tracing/trace_rectify_resize_fpga_integrated_streamlined")[discard_count:])
# # fix data of "*_integrated" to align with dimensions of the rest
# for i_set in range(len(image_pipeline_msg_sets_ms_fpga_streamlined)):
# image_pipeline_msg_sets_ms_fpga_streamlined[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
# image_pipeline_msg_sets_ms_fpga_streamlined_xrt = barchart_data(msgsets_from_trace_concurrent(str(os.environ["HOME"]) \
# + "/.ros/tracing/trace_rectify_resize_fpga_integrated_streamlined_xrt")[discard_count:])
# # fix data of "*_integrated" to align with dimensions of the rest
# for i_set in range(len(image_pipeline_msg_sets_ms_fpga_streamlined_xrt)):
# image_pipeline_msg_sets_ms_fpga_streamlined_xrt[i_set] += [0, 0, 0, 0, 0, 0, 0, 0, 0]
# #///////////////////
# # Markdown Table results
# #///////////////////
# table(
# [
# # full pipeline
# image_pipeline_msg_sets_ms_cpu,
# image_pipeline_msg_sets_ms_fpga,
# # # integrated
# image_pipeline_msg_sets_ms_fpga_integrated,
# # image_pipeline_msg_sets_ms_fpga_integrated_xrt,
# # streamlined
# image_pipeline_msg_sets_ms_fpga_streamlined,
# image_pipeline_msg_sets_ms_fpga_streamlined_xrt,
# # # integrated, streamlined
# # image_pipeline_msg_sets_ms_fpga_integrated_streamlined,
# # image_pipeline_msg_sets_ms_fpga_integrated_streamlined_xrt,
# #
# # # full pipeline stress
# # image_pipeline_msg_sets_ms_cpu,
# # image_pipeline_msg_sets_ms_fpga,
# # # image_pipeline_msg_sets_ms_fpga_streamlined,
# # # image_pipeline_msg_sets_ms_fpga_streamlined_xrt,
# # # integrated stress
# # image_pipeline_msg_sets_ms_fpga_integrated,
# # image_pipeline_msg_sets_ms_fpga_integrated_xrt,
# # # integrated, streamlined stress
# # # image_pipeline_msg_sets_ms_fpga_integrated_streamlined,
# # image_pipeline_msg_sets_ms_fpga_integrated_streamlined_xrt,
# ],
# [
# # full pipeline
# "CPU **baseline**",
# "FPGA @ 250 MHz",
# # # integrated
# "FPGA, integrated @ 250 MHz",
# # "FPGA, integrated, XRT @ 250 MHz",
# # streamlined
# "FPGA, streams (resize) @ 250 MHz",
# "FPGA, streams (resize), XRT @ 250 MHz",
# # # integrated, streamlined
# # "FPGA, integrated, streams @ 250 MHz",
# # "FPGA, integrated, streams, XRT @ 250 MHz",
# #
# # # full pipeline stress
# # "CPU **baseline**",
# # "FPGA @ 250 MHz",
# # # "FPGA, streams @ 250 MHz",
# # # "FPGA, streams, XRT @ 250 MHz",
# # # integrated stress
# # "FPGA, integrated @ 250 MHz",
# # "FPGA, integrated, XRT @ 250 MHz",
# # # integrated, streamlined stress
# # # "FPGA, integrated, streams @ 250 MHz",
# # "FPGA, integrated, streams, XRT @ 250 MHz",
# ]
# )
#///////////////////
# Plot, either averages or latest, etc
#///////////////////
# # plot latest values
# df_cpu = pd.DataFrame(image_pipeline_msg_sets_ms_cpu[-1:]) # pick the latest one
# df_fpga = pd.DataFrame(image_pipeline_msg_sets_ms_fpga[-1:]) # pick the latest one
# df = pd.concat([df_cpu, df_fpga], ignore_index=True)
# df.columns = target_chain_dissambiguous
# substrates = pd.DataFrame({'substrate': ["CPU","FPGA"]})
# df = df.join(substrates)
# plot averages
df_cpu_mean =
|
pd.DataFrame(image_pipeline_msg_sets_ms_cpu)
|
pandas.DataFrame
|
from millify import millify
import altair as alt
import pandas as pd
import streamlit as st
from pandas.tseries import offsets
from urllib.parse import urlparse
from . import utils
import streamlit.components.v1 as components
### SUmmary stats from coingecko
def get_cg_summary_data(coin_choice, df):
score_cols = [
"coingecko_score",
"developer_score",
"community_score",
"liquidity_score",
"public_interest_score",
]
coin_choice_df = df.loc[df.name == coin_choice]
genesis_date = coin_choice_df["genesis_date"].values[0]
last_updated = coin_choice_df["last_updated"].values[0]
contract_address = coin_choice_df["contract_address"].values[0]
coingecko_rank = coin_choice_df["coingecko_rank"].values[0]
market_cap_rank = coin_choice_df["market_cap_rank"].values[0]
sentiment_votes_up_percentage = coin_choice_df[
"sentiment_votes_up_percentage"
].values[0]
sentiment_votes_down_percentage = coin_choice_df[
"sentiment_votes_down_percentage"
].values[0]
# st.markdown("## Market Cap Rank")
st.metric(
label="Market Cap Rank",
value=f"#{market_cap_rank}",
)
st.metric(
label="CoinGecko Rank",
value=f"#{coingecko_rank}",
)
# st.markdown(
# f"<h1>Market Cap Rank #{market_cap_rank}</h1><h1>CoinGecko Rank #{coingecko_rank}</h1>",
# unsafe_allow_html=True,
# )
get_market_data(coin_choice, df)
st.markdown(
f'<h1>CoinGecko Sentiment<br><span style="color: green;">{sentiment_votes_up_percentage}%</span> <span style="color: red;"> {sentiment_votes_down_percentage}%</span></h1>',
unsafe_allow_html=True,
)
for col in score_cols:
st.markdown(
f"<p class='small-font'><strong>{col.replace('_', ' ').capitalize()}</strong>: {coin_choice_df[col].values[0]:.2f}%</p>", # noqa: E501
unsafe_allow_html=True,
)
if not pd.isna(coin_choice_df["contract_address"].values[0]):
st.markdown(
f'<h1>Contract Address {contract_address} <a href="https://etherscan.io/address/{contract_address}">Etherscan</a></h1>',
unsafe_allow_html=True,
)
##### Market Data
def get_market_data(coin_choice, df):
market_data_json = df.loc[df.name == coin_choice, "market_data"].values[0]
market_cap = market_data_json["market_cap"]
current_price = market_data_json["current_price"]
circulating_supply = market_data_json["circulating_supply"]
max_supply = market_data_json["max_supply"]
mc_change_percentage_24h = market_data_json[
"market_cap_change_percentage_24h_in_currency"
]
price_change_percentage_24h = market_data_json[
"price_change_percentage_24h_in_currency"
]
# text = f"#### Market Cap {market_cap['usd']}\n#### Total Supply {circulating_supply}\n#### Current Price {current_price['usd']}\n#### Price Change 24h {price_change_percentage_24h['usd']}\n"
# market_stats = {
# "market_cap": (f"${market_cap['usd']:,}", "💰"),
# "current_price": (f"${current_price['usd']:,}", "🤑"),
# "circulating_supply": (f"{circulating_supply:,}", "💩"),
# "price_change_percentage_24h": (
# f"{price_change_percentage_24h['usd']:.0}%",
# "%",
# ),
# }
st.metric(
label="Market Cap",
value=f"${millify(market_cap['usd'])} 💰",
# value=f"${market_cap['usd']:,}",
delta=f"MC Change 24h {mc_change_percentage_24h['usd']:.0}%",
delta_color="normal",
)
st.metric(
label="Current Price",
value=f"${current_price['usd']:,} 🤑",
delta=f"Price Change 24h {price_change_percentage_24h['usd']:.0}%",
delta_color="normal",
)
if max_supply:
st.metric(
label="Circulating Supply",
value=f"{millify(circulating_supply, precision = 3)} 💩",
delta=f"Max Supply {millify(max_supply, precision = 3)}",
delta_color="off",
)
# for stat in market_stats.items():
# st.markdown(
# f"<p class='small-font'>{stat[1][1]} <strong>{stat[0]}</strong>: {stat[1][0]}</p>", # noqa: E501
# unsafe_allow_html=True,
# )
####### SOCIALS
def get_community_data(coin_choice, df):
market_data_json = df.loc[df.name == coin_choice, "community_data"].values[0]
market_data_json = {k: v if v else 0 for (k, v) in market_data_json.items()}
resp = {
# "Facebook Likes": (f"{market_data_json['facebook_likes']:,}", "💬"),
"Twitter Followers": (f"{market_data_json['twitter_followers']:,}", "💬"),
"Reddit Average posts 48h": (
f"{market_data_json['reddit_average_posts_48h']:,}",
"💬",
),
"Reddit Average Comments 48h": (
f"{market_data_json['reddit_average_comments_48h']:,}",
"💬",
),
"Reddit Subscribers": (f"{market_data_json['reddit_subscribers']:,}", "💬"),
"Reddit Accounts Active 48h": (
f"{market_data_json['reddit_accounts_active_48h']:,}",
"💬",
),
"Telegram User Count": (
f"{market_data_json['telegram_channel_user_count']:,}",
"💬",
),
}
for stat in resp.items():
st.markdown(
f"<p class='small-font'>{stat[1][1]} <strong>{stat[0]}</strong>: {stat[1][0]}</p>", # noqa: E501
unsafe_allow_html=True,
)
def get_social_links_data(coin_choice, df):
"""Gets Social media links from coingecko df"""
links_json = df.loc[df.name == coin_choice, "links"].values[0]
homepage = links_json.get("homepage")[0]
twitter_screen_name = links_json.get("twitter_screen_name")
twitter_link = f"https://twitter.com/{twitter_screen_name}"
subreddit_url = links_json.get("subreddit_url")
gitlinks = links_json.get("repos_url").get("github", [""])
google = f"https://www.google.com/search?q={coin_choice}"
return {
"twitter": twitter_screen_name,
"github": gitlinks,
"reddit": subreddit_url,
"homepage": homepage,
"google": google,
}
def make_clickable(val):
return f'<a target="_blank" href="{val}">{val}</a>'
def get_repo_stats_aggregates(coin_choice, data):
repo_link_choice = data.loc[
data.name == coin_choice, "github_repos_complete"
].values[0]
import re
pd.set_option("display.max_colwidth", -1)
repo_link_choice = [re.sub("\.git", "", i) for i in repo_link_choice]
repo_paths = [f"'{str(urlparse(path).path)[1:]}'" for path in repo_link_choice]
repo_paths_dict = {
f"{str(urlparse(path).path)[1:]}": path for path in repo_link_choice
}
df = utils.get_coin_multiple_repos_stats(repo_paths)
df_agg = df.groupby(by=["repo_path"]).sum()
df_agg = df_agg.apply(lambda x: pd.to_numeric(x, downcast="integer"))
df_agg.sort_values(by="stargazer_size", ascending=False, inplace=True)
df_agg["url"] = df_agg.index.map(repo_paths_dict)
df_agg.reset_index(inplace=True)
# st.write(df_agg.index)
# st.write(repo_paths_dict.keys())
cell_hover = { # for row hover use <tr> instead of <td>
"selector": "td:hover",
"props": [("background-color", "#ffffb3")],
}
index_names = {
"selector": ".index_name",
"props": "font-style: monospace; color: darkgrey; font-weight:normal;",
}
headers = {
"selector": "th:not(.index_name)",
"props": "background-color: #000066; color: white;",
}
df_agg_styled = (
df_agg.style.format(
{
"stargazer_size": "{:,}",
"additions": lambda x: f"{millify(x)}",
"deletions": lambda x: f"{millify(x)}",
"total_commits": "{:,}",
"url": make_clickable,
}
)
# .background_gradient(
# axis=0,
# cmap="YlOrRd",
# subset=["additions", "deletions", "total_commits"],
# )
.bar(
subset=[
"stargazer_size",
],
color="#a69232",
)
.bar(subset=["additions"], color="#308a20")
.bar(subset=["deletions"], color="#bd352b")
.bar(subset=["total_commits"], color="#2fc3d6")
.set_table_styles([cell_hover, index_names, headers])
.set_properties(**{"background-color": "#a3d3d9", "font-family": "monospace"})
)
# st.dataframe(df_agg, height=600)
# st.markdown(df_agg.to_html(), unsafe_allow_html=True)
# st.experimental_show(df_agg)
components.html(df_agg_styled.to_html(), height=600, scrolling=True)
# Data download button
download_data = utils.convert_df(df_agg)
st.download_button(
label="Download data as CSV",
data=download_data,
file_name="repo_stats_aggregates.csv",
mime="text/csv",
)
# download_url = utils.convert_df(df_agg)
# st.markdown(download_url, unsafe_allow_html=True)
def get_repo_stats_history(coin_choice, data):
# links_dict = get_social_links_data(coin_choice, data)
# repo_link_choice = (
# links_dict["github"]
# if isinstance(links_dict["github"], list)
# else list(links_dict["github"])
# )
repo_link_choice = data.loc[
data.name == coin_choice, "github_repos_complete"
].values[0]
import re
repo_link_choice = [re.sub("\.git", "", i) for i in repo_link_choice]
repo_paths = [f"'{str(urlparse(path).path)[1:]}'" for path in repo_link_choice]
df = utils.get_coin_multiple_repos_stats(repo_paths[:100])
return df
def get_social_links_html(coin_choice, df):
"""Produces HTML for UI: Social media links from coingecko df"""
HtmlFile = open("./components/social_links.html", "r", encoding="utf-8")
source_code = HtmlFile.read()
links_dict = get_social_links_data(coin_choice, df)
github_html = "".join(
[f'<a href={link} class ="fa fa-github"></a>' for link in links_dict["github"]]
)
links_html = f'<body><a href="{links_dict["homepage"]}" class="fa fa-rss"></a><a href="{links_dict["twitter"]}" class="fa fa-twitter"></a><a href="{links_dict["google"]}" class="fa fa-google"></a><a href="{links_dict["reddit"]}" class="fa fa-reddit"></a>{github_html}</body></html>'
return "</html> " + source_code + links_html
def get_donate_button():
HtmlFile = open("./components/donate_eth.html", "r", encoding="utf-8")
source_code = HtmlFile.read()
return source_code
######## GITHUB
def get_git_bar(data, container):
with container:
st.write(plot_cum_commits(data))
contributors = data["author"].unique().tolist()
contributors.insert(0, None) # Manually add default
# Filters
contributor = st.selectbox("Select Contributor", contributors, index=0)
start = st.date_input("Start Date", value=min(data["committed_on"]))
end = st.date_input("End Date", value=max(data["committed_on"]))
# Data download button
if st.button("Download Data"):
download_url = utils.download_data(data)
st.markdown(download_url, unsafe_allow_html=True)
return start, end, contributor
def get_repo_source():
"""Gets repo path (remote or uploaded file) and displays relevant UI"""
input_type = st.sidebar.radio(
"Input type input (.json/repo link)", ("Local .json", "Repo Link")
)
if input_type == "Local .json":
repo_source = st.sidebar.file_uploader("Add your file here")
elif input_type == "Repo Link":
repo_source = st.sidebar.text_input("Add repo URL here", key="repo_url")
return repo_source
def plot_top_contributors(data):
"""Plots top n contributors in a vertical histogram"""
bars = (
alt.Chart(data[:30])
.mark_bar()
.encode(
x=alt.X("n_commits", title="N. Commits"),
y=alt.Y("author", sort="-x", title=""),
tooltip=[
alt.Tooltip("author", title="Author"),
alt.Tooltip("n_commits", title="N. Commits", format=",.0f"),
],
)
.properties(width=850, height=430, title="Top 30 Contributors")
)
text = bars.mark_text(align="left", baseline="middle", dx=3).encode(
text="n_commits:Q"
)
return bars + text
def plot_daily_contributions(data):
"""Plots daily commits in a bar chart"""
agg = (
data.groupby(pd.Grouper(key="committed_on", freq="1D"))["hash"]
.count()
.reset_index()
)
plot = (
alt.Chart(agg)
.mark_bar()
.encode(
x=alt.X("committed_on", title="Date"),
y=alt.Y("hash", title="Commits", axis=alt.Axis(grid=False)),
tooltip=[
alt.Tooltip("committed_on", title="Date"),
alt.Tooltip("hash", title="Commits"),
],
)
.properties(height=170, width=850, title="Daily Changes")
)
return plot
def plot_inserts_deletions(data):
"""Plots daily lines added/deleted in a bar chart"""
agg = data.copy()
agg["lines_deleted"] = -agg["lines_deleted"]
agg = (
agg.groupby(pd.Grouper(key="committed_on", freq="1D"))[
["lines_added", "lines_deleted"]
]
.sum()
.reset_index()
.melt(id_vars="committed_on")
)
plot = (
alt.Chart(agg)
.mark_bar()
.encode(
x=alt.X("committed_on", title="Date"),
y=alt.Y("value", title=""),
color=alt.condition(
alt.datum.value > 0, alt.value("green"), alt.value("red")
),
tooltip=[
alt.Tooltip("committed_on", title="Date"),
alt.Tooltip("value", title="Lines Changed", format=",.0f"),
alt.Tooltip("variable"),
],
)
).properties(height=170, width=850, title="Daily Lines Added/Removed")
return plot
def plot_cum_commits(data):
"""Plots cumulative commits for sidebar plot"""
added_commits_cumsum = (
data.groupby(
|
pd.Grouper(key="committed_on", freq="1D")
|
pandas.Grouper
|
import concurrent
import os
import re
import shutil
import xml.etree.ElementTree as ET # TODO do we have this as requirement?
from concurrent.futures import as_completed
from concurrent.futures._base import as_completed
from pathlib import Path
import ffmpeg
import pandas as pd
import webrtcvad
from audio_korpora_pipeline.baseobjects import FileHandlingObject
from audio_korpora_pipeline.inputadapter.audiosplit.splitter import Splitter
from audio_korpora_pipeline.metamodel.mediasession import MediaAnnotationBundle, \
MediaAnnotationBundleWithoutTranscription, WrittenResource, MediaFile, \
MediaSessionActor, Sex, \
MediaSessionActors, MediaSession
class Adapter(FileHandlingObject):
def __init__(self, config):
super(Adapter, self).__init__()
def toMetamodel(self) -> MediaSession:
raise NotImplementedError("Please use a subclass")
def skipAlreadyProcessedFiles(self):
skip = self.config['global']['skipAlreadyProcessedFiles']
if not (skip):
self.logger.warn("No config setting for skipAlreadyProcessedFiles set. Assuming True")
return True
return skip
class UntranscribedMediaSplittingAdapter(Adapter):
AUDIO_SPLIT_AGRESSIVENESS = 3 # webrtcvad 1 (low), 3 (max)
ADAPTERNAME = "MediaSplittingAdapter"
mediaAnnotationBundles = []
mediaSessionActors = set() # using a set so we don't have duplets
def __init__(self, config):
super(UntranscribedMediaSplittingAdapter, self).__init__(config=config)
self.config = config
self.mediaSessionActors.add(MediaSessionActor("UNKNOWN", Sex.UNKNOWN, None))
def _splitMonoRawAudioToVoiceSectionsThread(self, file, outputpath):
self.logger.debug("Splitting file into chunks: {}".format(self._getFilenameWithExtension(file)))
splitter = Splitter()
vad = webrtcvad.Vad(int(self.AUDIO_SPLIT_AGRESSIVENESS))
basename = self._getFilenameWithoutExtension(file)
audiochunkPathsForThisfile = []
try:
audio, sample_rate = splitter.read_wave(file)
frames = splitter.frame_generator(30, audio, sample_rate)
frames = list(frames)
segments = splitter.vad_collector(sample_rate, 30, 300, vad, frames)
for i, segment in enumerate(segments):
path = os.path.join(outputpath, basename + '_chunk_{:05d}.wav'.format(i))
self.logger.debug("Write chunk {} of file {}".format(i, file))
splitter.write_wave(path, segment, sample_rate)
audiochunkPathsForThisfile.append(path)
# write staging complete file
stagingPath = os.path.join(outputpath, basename + ".stagingComplete")
with open(stagingPath, 'a'):
os.utime(stagingPath, None)
self.logger.debug("Finished splitting file {}".format(file))
except Exception as excep:
self.logger.warn("Could split file into chunks {}. Skipping".format(file), exc_info=excep)
return (False, str(file), []) # returning an empty list, as no success here
return (True, str(file), audiochunkPathsForThisfile)
def _convertMediafileToMonoAudioThread(self, filenumber, totalNumberOfFiles, singleFilepathToProcess, outputPath):
self.logger.debug(
"Processing file {}/{} on path {}".format(filenumber + 1, totalNumberOfFiles, singleFilepathToProcess))
nextFilename = os.path.join(outputPath, self._getFilenameWithoutExtension(singleFilepathToProcess) + ".wav")
try:
(ffmpeg
.input(singleFilepathToProcess)
.output(nextFilename, format='wav', acodec='pcm_s16le', ac=1, ar='16k')
.overwrite_output()
.run()
)
except ffmpeg.Error as ffmpgError:
self.logger.warn("Ffmpeg rose an error", exc_info=ffmpgError)
self.logger.warn("Due to error of ffmpeg skipped file {}".format(singleFilepathToProcess))
return (False, str(singleFilepathToProcess), str(nextFilename))
except Exception as e:
self.logger.warn("Got an error while using ffmpeg for file {}".format(singleFilepathToProcess), exc_info=e)
return (False, str(singleFilepathToProcess), str(nextFilename))
return (True, str(singleFilepathToProcess), str(nextFilename))
def createMediaSession(self, bundles):
session = MediaSession(self.ADAPTERNAME, self.mediaSessionActors, bundles)
return session
def createMediaAnnotationBundles(self, audiochunks):
annotationBundles = []
for index, filepath in enumerate(audiochunks):
bundle = MediaAnnotationBundleWithoutTranscription(identifier=filepath) # we do not have any written ressources
bundle.setMediaFile(filepath)
annotationBundles.append(bundle)
return annotationBundles
def splitAudioToChunks(self, filesToChunk, outputPath):
if ((filesToChunk == None) or (len(filesToChunk) == 0)):
self.logger.info("Nothing to split, received empty wav-filenamelist")
return []
successfullyChunkedFiles = []
with concurrent.futures.ThreadPoolExecutor(max_workers=None) as executor:
futures = []
for filenumber, file in enumerate(filesToChunk):
futures.append(
executor.submit(self._splitMonoRawAudioToVoiceSectionsThread, file, outputPath))
for future in as_completed(futures):
if (future.result()[0] == False):
self.logger.warning("Couldnt split audiofile {}, removing from list".format(future.result()[1]))
else:
successfullyChunkedFiles.extend(future.result()[2])
self.logger.debug("Splitting Audio is done {}".format(future.result()))
self.logger.debug("Finished splitting {} wav files".format(len(filesToChunk)))
return successfullyChunkedFiles
def determineWavFilesToChunk(self, baseFilesToChunk, stagingChunkPath):
allStageIndicatorFilesFullpath = set(self._getAllMediaFilesInBasepath(stagingChunkPath, {".stagingComplete"}))
allExistingChunkedFilesFullpath = set(self._getAllMediaFilesInBasepath(stagingChunkPath, {".wav"}))
allStageIndicatorFilesDictionary = self._toFilenameDictionary(allStageIndicatorFilesFullpath)
allBaseFilesDictionary = self._toFilenameDictionary(baseFilesToChunk)
stagingCompleteCorrectKeys = set(allBaseFilesDictionary.keys()).intersection(
set(allStageIndicatorFilesDictionary.keys()))
stagingIncompleteCorrectKeys = set(allBaseFilesDictionary.keys()).difference(
set(allStageIndicatorFilesDictionary.keys()))
stagingComplete = []
for fullpath in allExistingChunkedFilesFullpath:
if any(self._getFilenameWithoutExtension(fullpath).startswith(cm) for cm in stagingCompleteCorrectKeys):
stagingComplete.append(fullpath)
stagingIncomplete = [allBaseFilesDictionary[key] for key in stagingIncompleteCorrectKeys]
self.logger.debug("Got {} files not yet chunked".format(len(stagingIncomplete)))
self.logger.debug("Got {} files chunked".format(len(stagingComplete)))
return stagingIncomplete, stagingComplete
def convertMediaFilesToMonoAudio(self, filesToProcess, outputpath, adapterName):
if (filesToProcess == None or len(filesToProcess) == 0):
self.logger.debug("No files to convert for {}, skipping".format(adapterName))
return []
successfulFilenames = []
with concurrent.futures.ThreadPoolExecutor(max_workers=None) as executor:
futures = []
for filenumber, currentFile in enumerate(filesToProcess):
futures.append(
executor.submit(self._convertMediafileToMonoAudioThread, filenumber, len(filesToProcess),
currentFile, outputpath))
for future in as_completed(futures):
if (future.result()[0] == False):
self.logger.warning("Couldnt process audiofile {}, removing from list".format(future.result()[1]))
else:
successfulFilenames.append(future.result()[2])
self.logger.debug("Processing Audio is done {} for Converter {}".format(future.result(), adapterName))
return successfulFilenames
def _toFilenameDictionary(self, list):
if (list == None or len(list) == 0):
self.logger.debug("Got nothing in list, returning empty dictionary")
return dict()
listDict = dict()
for fullpath in list:
listDict[self._getFilenameWithoutExtension(fullpath)] = fullpath
self.logger.debug("Created dictionary of files of length {}".format(len(listDict)))
return listDict
def determineFilesToConvertToMonoFromGivenLists(self, alreadyStagedFiles, originalFiles, adaptername):
dictionaryOfOriginalFilepaths = self._toFilenameDictionary(originalFiles)
dictionaryOfStagedFilepaths = self._toFilenameDictionary(alreadyStagedFiles)
notYetProcessedKeys = set(dictionaryOfOriginalFilepaths.keys()).difference(set(dictionaryOfStagedFilepaths.keys()))
alreadyProcessedKeys = set(dictionaryOfOriginalFilepaths.keys()).intersection(
set(dictionaryOfStagedFilepaths.keys()))
fullpathsToNotYetProcessed = [dictionaryOfOriginalFilepaths[key] for key in notYetProcessedKeys]
fullpathsProcessed = [dictionaryOfStagedFilepaths[key] for key in alreadyProcessedKeys]
self.logger.debug("Got {} files not yet processed for corpus {}".format(len(notYetProcessedKeys), adaptername))
self.logger.debug("Got {} files already processed for corpus {}".format(len(alreadyProcessedKeys), adaptername))
return fullpathsToNotYetProcessed, fullpathsProcessed
def _preprocess_workflow_with_splitting(self, filesAlreadyProcessed, filesToProcess, monoPath, chunkPath,
adaptername):
filesSuccessfullyProcessed = self.convertMediaFilesToMonoAudio(filesToProcess, monoPath, adaptername)
baseFilesToChunk = []
baseFilesToChunk = baseFilesToChunk + filesSuccessfullyProcessed + filesAlreadyProcessed
# split mono audio to chunks
filesToChunk, filesAlreadyChunked = self.determineWavFilesToChunk(baseFilesToChunk,
chunkPath)
filesSuccessfullyChunked = self.splitAudioToChunks(filesToChunk, chunkPath)
# add chunks to media session
mediaBundleFiles = [] + filesSuccessfullyChunked + filesAlreadyChunked
mediaAnnotationbundles = self.createMediaAnnotationBundles(mediaBundleFiles)
mediaSession = self.createMediaSession(mediaAnnotationbundles)
return mediaSession
class UntranscribedVideoAdapter(UntranscribedMediaSplittingAdapter):
ADAPTERNAME = "UntranscribedVideoAdapter"
def __init__(self, config):
super(UntranscribedVideoAdapter, self).__init__(config=config)
self.config = config
def toMetamodel(self):
self.logger.debug("Untranscribed Video Korpus")
# convert video to mono audio
filesToProcess, filesAlreadyProcessed = self._determineVideoFilesToConvertToMono()
return self._preprocess_workflow_with_splitting(filesAlreadyProcessed, filesToProcess,
self._validateStagingMonoPath(), self._validateStagingChunksPath(),
self.ADAPTERNAME)
def _validateKorpusPath(self):
korpus_path = self.config['untranscribed_videos_input_adapter']['korpus_path']
if not os.path.isdir(korpus_path):
raise IOError("Could not read korpus path" + korpus_path)
return korpus_path
def _validateStagingMonoPath(self):
workdir = self.config['global']['workdir']
if not os.path.isdir(workdir):
raise IOError("Could not read workdir path" + workdir)
workdir = Path(workdir).joinpath("untranscribed_video_staging_mono")
workdir.mkdir(parents=True, exist_ok=True)
return str(workdir)
def _validateStagingChunksPath(self):
workdir = self.config['global']['workdir']
if not os.path.isdir(workdir):
raise IOError("Could not read workdir path" + workdir)
workdir = Path(workdir).joinpath("untranscribed_video_staging_chunks")
workdir.mkdir(parents=True, exist_ok=True)
return str(workdir)
def _determineVideoFilesToConvertToMono(self):
originalFiles = set(self._getAllMediaFilesInBasepath(self._validateKorpusPath(), {".mp4"}))
alreadyStagedFiles = set(self._getAllMediaFilesInBasepath(self._validateStagingMonoPath(), {".wav"}))
self.logger.debug("Got {} original untranscribed mp4 files to process".format(len(originalFiles)))
return self.determineFilesToConvertToMonoFromGivenLists(alreadyStagedFiles, originalFiles, self.ADAPTERNAME)
class ChJugendspracheAdapter(UntranscribedMediaSplittingAdapter):
ADAPTERNAME = "CHJugendspracheAdapter"
def __init__(self, config):
super(ChJugendspracheAdapter, self).__init__(config=config)
self.config = config
def toMetamodel(self):
self.logger.debug("CH-Jugendsprache Korpus")
# convert audio to mono audio
filesToProcess, filesAlreadyProcessed = self._determineChJugendspracheFilesToConvertToMono()
return self._preprocess_workflow_with_splitting(filesAlreadyProcessed, filesToProcess,
self._validateStagingMonoPath(), self._validateStagingChunksPath(),
self.ADAPTERNAME)
def _determineChJugendspracheFilesToConvertToMono(self):
originalFiles = set(self._getAllMediaFilesInBasepath(self._validateKorpusPath(), {".WAV", ".wav"}))
alreadyStagedFiles = set(self._getAllMediaFilesInBasepath(self._validateStagingMonoPath(), {".wav"}))
self.logger.debug("Got {} original jugendsprache files to process".format(len(originalFiles)))
return self.determineFilesToConvertToMonoFromGivenLists(alreadyStagedFiles, originalFiles, self.ADAPTERNAME)
def _validateStagingMonoPath(self):
workdir = self.config['global']['workdir']
if not os.path.isdir(workdir):
raise IOError("Could not read workdir path" + workdir)
workdir = Path(workdir).joinpath("ch_jugensprache_staging_mono")
workdir.mkdir(parents=True, exist_ok=True)
return str(workdir)
def _validateStagingChunksPath(self):
workdir = self.config['global']['workdir']
if not os.path.isdir(workdir):
raise IOError("Could not read workdir path" + workdir)
workdir = Path(workdir).joinpath("ch_jugensprache_staging_chunks")
workdir.mkdir(parents=True, exist_ok=True)
return str(workdir)
def _validateKorpusPath(self):
korpus_path = self.config['ch_jugendsprache_input_adapter']['korpus_path']
if not os.path.isdir(korpus_path):
raise IOError("Could not read korpus path" + korpus_path)
return korpus_path
class ArchimobAdapter(UntranscribedMediaSplittingAdapter):
"""
ArchimobAdapter
"""
ADAPTERNAME = "Archimob"
def __init__(self, config):
super(ArchimobAdapter, self).__init__(config=config)
self.config = config
def _validateKorpusPath(self):
korpus_path = self.config['archimob_input_adapter']['korpus_path']
if not os.path.isdir(korpus_path):
raise IOError("Could not read korpus path" + korpus_path)
return korpus_path
def _transcription_pause_tag_symbol(self):
symbol = self.config['archimob_input_adapter']['transcription_pause_tag_symbol']
if not symbol:
self.logger.warn("No symbol for transcription pause tag configured, falling back to default, which is '@'-Symbol")
symbol = '@'
return symbol
def _transcription_vocal_tag_symbol(self):
symbol = self.config['archimob_input_adapter']['transcription_vocal_tag_symbol']
if not symbol:
self.logger.warn("No symbol for transcription pause tag configured, falling back to default, which is '#'-Symbol")
symbol = '#'
return symbol
def _validateWorkdir(self):
workdir = self.config['global']['workdir']
if not os.path.isdir(workdir):
raise IOError("Could not read workdir path" + workdir)
workdir = Path(workdir).joinpath("archimob_staging")
workdir.mkdir(parents=True, exist_ok=True)
return str(workdir)
def _determineArchimobFilesToProcess(self):
originalFiles = set(self._getAllMediaFilesInBasepath(self._validateKorpusPath(), {".wav"}))
originalFiles = self._fixOriginalDatasetFlawsIfNecessary(originalFiles)
alreadyStagedFiles = set(self._getAllMediaFilesInBasepath(self._validateWorkdir(), {".wav"}))
self.logger.debug("Got {} original archimob files to process".format(len(originalFiles)))
return self.determineFilesToConvertToMonoFromGivenLists(alreadyStagedFiles, originalFiles, self.ADAPTERNAME)
def toMetamodel(self):
self.logger.debug("Archimob V2 Korpus")
# convert chunks to mono audio
filesToProcess, filesAlreadyProcessed = self._determineArchimobFilesToProcess()
filesSuccessfullyProcessed = self.convertMediaFilesToMonoAudio(filesToProcess, self._validateWorkdir(),
self.ADAPTERNAME)
filesForMediaBundle = []
filesForMediaBundle = filesForMediaBundle + filesSuccessfullyProcessed + filesAlreadyProcessed
# add chunks to media session
mediaAnnotationbundles = self.createMediaAnnotationBundles(filesForMediaBundle)
mediaSession = self.createMediaSession(mediaAnnotationbundles)
return mediaSession
def createMediaSession(self, bundles):
actors = self._createMediaSessionActorsFromBundles(bundles)
session = MediaSession(self.ADAPTERNAME, actors, bundles)
return session
def createMediaAnnotationBundles(self, filesForMediaBundle):
allXmlOriginalTranscriptionFiles = self._archimobOriginalTranscriptionFiles(self._validateKorpusPath())
transcriptionsPerSpeaker = self._extract(allXmlOriginalTranscriptionFiles)
mediaFilesAndTranscription = self._onlyTranscriptionsWithMediaFilesAndViceVersa(transcriptionsPerSpeaker,
filesForMediaBundle)
mediaAnnotationBundles = self._createActualMediaAnnotationBundles(mediaFilesAndTranscription)
return mediaAnnotationBundles
def _fixOriginalDatasetFlawsIfNecessary(self, originalFiles):
# As of Archimobe release V2 there are some minor flaws in the data, which are treated sequentially
if (self._fixForDuplicateWavs1063Necessary(originalFiles)):
originalFiles = self._fixForDuplicateWavs1063(originalFiles)
if (self._fixForWrongFilenames1082Necessary(originalFiles)):
originalFiles = self._fixForWrongFilenames1082(originalFiles)
return originalFiles
def _fixForDuplicateWavs1063Necessary(self, originalFiles):
# This flaw is simply, that within 1063 there exists another folder 1063 containing all files again
existingPathsForDoubled1063 = list(
filter(lambda file: os.path.sep + "1063" + os.path.sep + "1063" + os.path.sep in file, originalFiles))
fixNecessary = len(existingPathsForDoubled1063) > 0
self.logger.info("Found {} files of speaker 1063 which are duplicates. They will be ignored".format(
len(existingPathsForDoubled1063)))
return fixNecessary
def _fixForDuplicateWavs1063(self, originalFiles):
# fix is simply by removing the files in question from list
pathsWithout1063duplicates = list(
filter(lambda file: not (os.path.sep + "1063" + os.path.sep + "1063" + os.path.sep in file), originalFiles))
originalFiles = pathsWithout1063duplicates
return originalFiles
def _fixForWrongFilenames1082Necessary(self, originalFiles):
regexForFindingWrongNames = "(^\d{4}_\d)(d\d{4}_.*\.wav)" # like 1082_2d1082_2_TLI_3.wav
onlyFilenames = [os.path.basename(filename) for filename in originalFiles]
for filename in onlyFilenames:
m = re.search(regexForFindingWrongNames, filename)
if (not (m is None)):
return True
return False
def _fixForWrongFilenames1082(self, originalFiles):
fixedFiles = originalFiles.copy()
regexForFindingWrongFullpaths = "(.*\\" + os.path.sep + ")(\d{4}_\d)(d\d{4}_.*\.wav)" # like /home/somebody/files/1082/1082_2d1082_2_TLI_3.wav
for filename in originalFiles:
m = re.search(regexForFindingWrongFullpaths, filename)
if (not (m is None)):
newFilename = m.group(1) + m.group(3)
self.logger.debug(
"Fix 1082: Renaming file {} from {} to {}".format(m.group(2) + m.group(3), filename, newFilename))
try:
shutil.move(filename, newFilename)
fixedFiles.append(newFilename)
except Exception as inst:
self.logger.warn(
"Could not move file {} to {}, skipping and just removing from usable filenames".format(filename,
newFilename),
exc_info=inst)
fixedFiles.remove(filename)
return fixedFiles
def _archimobOriginalTranscriptionFiles(self, path):
xmlOriginalFiles = list(Path(path).glob("**/*.xml"))
self.logger.debug("Found {} original xml files for archimob".format(len(xmlOriginalFiles)))
return xmlOriginalFiles
def _extract(self, allXmlOriginalTranscriptionFiles):
transcriptionsPerSpeaker = []
with concurrent.futures.ThreadPoolExecutor(max_workers=None) as executor:
futures = []
for filenumber, file in enumerate(allXmlOriginalTranscriptionFiles):
futures.append(executor.submit(self._extractSingleXmlFileThread, file))
for future in as_completed(futures):
if (future.result()[0] == False):
self.logger.warning("Couldnt extract metadata for file {}, removing from list".format(future.result()[1]))
else:
transcriptionsPerSpeaker.append(
(future.result()[1], future.result()[2])) # tuple of original file and transcription dataframe
self.logger.debug("Extracting metadata for speaker finished {}".format(future.result()))
self.logger.debug("Finished metadata extraction for all {} xml files".format(len(allXmlOriginalTranscriptionFiles)))
return transcriptionsPerSpeaker
def _extractSingleXmlFileThread(self, xmlFile):
namespaceprefix = "{http://www.tei-c.org/ns/1.0}"
try:
tree = ET.parse(xmlFile)
root = tree.getroot()
ch_datacolumns = pd.DataFrame(columns=['Filename', 'transcript'])
transcriptionForSpeaker =
|
pd.DataFrame(columns=ch_datacolumns.columns)
|
pandas.DataFrame
|
from collections import OrderedDict
import datetime
from datetime import timedelta
from io import StringIO
import json
import os
import numpy as np
import pytest
from pandas.compat import is_platform_32bit, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json
import pandas._testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_intframe = DataFrame({k: v.astype(np.int64) for k, v in _seriesd.items()})
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name="E")
_cat_frame["E"] = list(reversed(cat))
_cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64")
_mixed_frame = _frame.copy()
def assert_json_roundtrip_equal(result, expected, orient):
if orient == "records" or orient == "values":
expected = expected.reset_index(drop=True)
if orient == "values":
expected.columns = range(len(expected.columns))
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:the 'numpy' keyword is deprecated:FutureWarning")
class TestPandasContainer:
@pytest.fixture(autouse=True)
def setup(self):
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
yield
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self, orient):
df = DataFrame(
[["a", "b"], ["c", "d"]],
index=['index " 1', "index / 2"],
columns=["a \\ b", "y / z"],
)
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["split", "records", "values"])
def test_frame_non_unique_index(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["index", "columns"])
def test_frame_non_unique_index_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
msg = f"DataFrame index must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
@pytest.mark.parametrize("orient", ["split", "values"])
@pytest.mark.parametrize(
"data",
[
[["a", "b"], ["c", "d"]],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]],
],
)
def test_frame_non_unique_columns(self, orient, data):
df = DataFrame(data, index=[1, 2], columns=["x", "x"])
result = read_json(
df.to_json(orient=orient), orient=orient, convert_dates=["x"]
)
if orient == "values":
expected = pd.DataFrame(data)
if expected.iloc[:, 0].dtype == "datetime64[ns]":
# orient == "values" by default will write Timestamp objects out
# in milliseconds; these are internally stored in nanosecond,
# so divide to get where we need
# TODO: a to_epoch method would also solve; see GH 14772
expected.iloc[:, 0] = expected.iloc[:, 0].astype(np.int64) // 1000000
elif orient == "split":
expected = df
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("orient", ["index", "columns", "records"])
def test_frame_non_unique_columns_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])
msg = f"DataFrame columns must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
def test_frame_default_orient(self, float_frame):
assert float_frame.to_json() == float_frame.to_json(orient="columns")
@pytest.mark.parametrize("dtype", [False, float])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype, float_frame):
data = float_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = float_frame
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [False, np.int64])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype):
data = self.intframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = self.intframe.copy()
if (
numpy
and (is_platform_32bit() or is_platform_windows())
and not dtype
and orient != "split"
):
# TODO: see what is causing roundtrip dtype loss
expected = expected.astype(np.int32)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [None, np.float64, np.int, "U3"])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype):
df = DataFrame(
np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)],
dtype=dtype,
)
# TODO: do we even need to support U3 dtypes?
if numpy and dtype == "U3" and orient != "split":
pytest.xfail("Can't decode directly to array")
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = df.copy()
if not dtype:
expected = expected.astype(np.int64)
# index columns, and records orients cannot fully preserve the string
# dtype for axes as the index and column labels are used as keys in
# JSON objects. JSON keys are by definition strings, so there's no way
# to disambiguate whether those keys actually were strings or numeric
# beforehand and numeric wins out.
# TODO: Split should be able to support this
if convert_axes and (orient in ("split", "index", "columns")):
expected.columns = expected.columns.astype(np.int64)
expected.index = expected.index.astype(np.int64)
elif orient == "records" and convert_axes:
expected.columns = expected.columns.astype(np.int64)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_categorical(self, orient, convert_axes, numpy):
# TODO: create a better frame to test with and improve coverage
if orient in ("index", "columns"):
pytest.xfail(f"Can't have duplicate index values for orient '{orient}')")
data = self.categorical.to_json(orient=orient)
if numpy and orient in ("records", "values"):
pytest.xfail(f"Orient {orient} is broken with numpy=True")
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.categorical.copy()
expected.index = expected.index.astype(str) # Categorical not preserved
expected.index.name = None # index names aren't preserved in JSON
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_empty(self, orient, convert_axes, numpy, empty_frame):
data = empty_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = empty_frame.copy()
# TODO: both conditions below are probably bugs
if convert_axes:
expected.index = expected.index.astype(float)
expected.columns = expected.columns.astype(float)
if numpy and orient == "values":
expected = expected.reindex([0], axis=1).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_timestamp(self, orient, convert_axes, numpy):
# TODO: improve coverage with date_format parameter
data = self.tsframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.tsframe.copy()
if not convert_axes: # one off for ts handling
# DTI gets converted to epoch values
idx = expected.index.astype(np.int64) // 1000000
if orient != "split": # TODO: handle consistently across orients
idx = idx.astype(str)
expected.index = idx
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_mixed(self, orient, convert_axes, numpy):
if numpy and orient != "split":
pytest.xfail("Can't decode directly to array")
index = pd.Index(["a", "b", "c", "d", "e"])
values = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": [True, False, True, False, True],
}
df = DataFrame(data=values, index=index)
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = df.copy()
expected = expected.assign(**expected.select_dtypes("number").astype(np.int64))
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize(
"data,msg,orient",
[
('{"key":b:a:d}', "Expected object or value", "columns"),
# too few indices
(
'{"columns":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"Shape of passed values is \(3, 2\), indices imply \(2, 2\)",
"split",
),
# too many columns
(
'{"columns":["A","B","C"],'
'"index":["1","2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
"3 columns passed, passed data had 2 columns",
"split",
),
# bad key
(
'{"badkey":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"unexpected key\(s\): badkey",
"split",
),
],
)
def test_frame_from_json_bad_data_raises(self, data, msg, orient):
with pytest.raises(ValueError, match=msg):
read_json(StringIO(data), orient=orient)
@pytest.mark.parametrize("dtype", [True, False])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_frame_from_json_missing_data(self, orient, convert_axes, numpy, dtype):
num_df = DataFrame([[1, 2], [4, 5, 6]])
result = read_json(
num_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
assert np.isnan(result.iloc[0, 2])
obj_df = DataFrame([["1", "2"], ["4", "5", "6"]])
result = read_json(
obj_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
if not dtype: # TODO: Special case for object data; maybe a bug?
assert result.iloc[0, 2] is None
else:
assert np.isnan(result.iloc[0, 2])
@pytest.mark.parametrize("inf", [np.inf, np.NINF])
@pytest.mark.parametrize("dtype", [True, False])
def test_frame_infinity(self, orient, inf, dtype):
# infinities get mapped to nulls which get mapped to NaNs during
# deserialisation
df = DataFrame([[1, 2], [4, 5, 6]])
df.loc[0, 2] = inf
result = read_json(df.to_json(), dtype=dtype)
assert np.isnan(result.iloc[0, 2])
@pytest.mark.skipif(
is_platform_32bit(), reason="not compliant on 32-bit, xref #15865"
)
@pytest.mark.parametrize(
"value,precision,expected_val",
[
(0.95, 1, 1.0),
(1.95, 1, 2.0),
(-1.95, 1, -2.0),
(0.995, 2, 1.0),
(0.9995, 3, 1.0),
(0.99999999999999944, 15, 1.0),
],
)
def test_frame_to_json_float_precision(self, value, precision, expected_val):
df = pd.DataFrame([dict(a_float=value)])
encoded = df.to_json(double_precision=precision)
assert encoded == f'{{"a_float":{{"0":{expected_val}}}}}'
def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient="garbage")
def test_frame_empty(self):
df = DataFrame(columns=["jim", "joe"])
assert not df._is_mixed_type
tm.assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
# GH 7445
result = pd.DataFrame({"test": []}, index=[]).to_json(orient="columns")
expected = '{"test":{}}'
assert result == expected
def test_frame_empty_mixedtype(self):
# mixed type
df = DataFrame(columns=["jim", "joe"])
df["joe"] = df["joe"].astype("i8")
assert df._is_mixed_type
tm.assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
def test_frame_mixedtype_orient(self): # GH10289
vals = [
[10, 1, "foo", 0.1, 0.01],
[20, 2, "bar", 0.2, 0.02],
[30, 3, "baz", 0.3, 0.03],
[40, 4, "qux", 0.4, 0.04],
]
df = DataFrame(
vals, index=list("abcd"), columns=["1st", "2nd", "3rd", "4th", "5th"]
)
assert df._is_mixed_type
right = df.copy()
for orient in ["split", "index", "columns"]:
inp = df.to_json(orient=orient)
left = read_json(inp, orient=orient, convert_axes=False)
tm.assert_frame_equal(left, right)
right.index = np.arange(len(df))
inp = df.to_json(orient="records")
left = read_json(inp, orient="records", convert_axes=False)
tm.assert_frame_equal(left, right)
right.columns = np.arange(df.shape[1])
inp = df.to_json(orient="values")
left = read_json(inp, orient="values", convert_axes=False)
tm.assert_frame_equal(left, right)
def test_v12_compat(self, datapath):
df = DataFrame(
[
[1.56808523, 0.65727391, 1.81021139, -0.17251653],
[-0.2550111, -0.08072427, -0.03202878, -0.17581665],
[1.51493992, 0.11805825, 1.629455, -1.31506612],
[-0.02765498, 0.44679743, 0.33192641, -0.27885413],
[0.05951614, -2.69652057, 1.28163262, 0.34703478],
],
columns=["A", "B", "C", "D"],
index=pd.date_range("2000-01-03", "2000-01-07"),
)
df["date"] = pd.Timestamp("19920106 18:21:32.12")
df.iloc[3, df.columns.get_loc("date")] = pd.Timestamp("20130101")
df["modified"] = df["date"]
df.iloc[1, df.columns.get_loc("modified")] = pd.NaT
dirpath = datapath("io", "json", "data")
v12_json = os.path.join(dirpath, "tsframe_v012.json")
df_unser = pd.read_json(v12_json)
tm.assert_frame_equal(df, df_unser)
df_iso = df.drop(["modified"], axis=1)
v12_iso_json = os.path.join(dirpath, "tsframe_iso_v012.json")
df_unser_iso = pd.read_json(v12_iso_json)
tm.assert_frame_equal(df_iso, df_unser_iso)
def test_blocks_compat_GH9037(self):
index = pd.date_range("20000101", periods=10, freq="H")
df_mixed = DataFrame(
OrderedDict(
float_1=[
-0.92077639,
0.77434435,
1.25234727,
0.61485564,
-0.60316077,
0.24653374,
0.28668979,
-2.51969012,
0.95748401,
-1.02970536,
],
int_1=[
19680418,
75337055,
99973684,
65103179,
79373900,
40314334,
21290235,
4991321,
41903419,
16008365,
],
str_1=[
"78c608f1",
"64a99743",
"13d2ff52",
"ca7f4af2",
"97236474",
"bde7e214",
"1a6bde47",
"b1190be5",
"7a669144",
"8d64d068",
],
float_2=[
-0.0428278,
-1.80872357,
3.36042349,
-0.7573685,
-0.48217572,
0.86229683,
1.08935819,
0.93898739,
-0.03030452,
1.43366348,
],
str_2=[
"14f04af9",
"d085da90",
"4bcfac83",
"81504caf",
"2ffef4a9",
"08e2f5c4",
"07e1af03",
"addbd4a7",
"1f6a09ba",
"4bfc4d87",
],
int_2=[
86967717,
98098830,
51927505,
20372254,
12601730,
20884027,
34193846,
10561746,
24867120,
76131025,
],
),
index=index,
)
# JSON deserialisation always creates unicode strings
df_mixed.columns = df_mixed.columns.astype("unicode")
df_roundtrip = pd.read_json(df_mixed.to_json(orient="split"), orient="split")
tm.assert_frame_equal(
df_mixed,
df_roundtrip,
check_index_type=True,
check_column_type=True,
by_blocks=True,
check_exact=True,
)
def test_frame_nonprintable_bytes(self):
# GH14256: failing column caused segfaults, if it is not the last one
class BinaryThing:
def __init__(self, hexed):
self.hexed = hexed
self.binary = bytes.fromhex(hexed)
def __str__(self) -> str:
return self.hexed
hexed = "574b4454ba8c5eb4f98a8f45"
binthing = BinaryThing(hexed)
# verify the proper conversion of printable content
df_printable = DataFrame({"A": [binthing.hexed]})
assert df_printable.to_json() == f'{{"A":{{"0":"{hexed}"}}}}'
# check if non-printable content throws appropriate Exception
df_nonprintable = DataFrame({"A": [binthing]})
msg = "Unsupported UTF-8 sequence length when encoding string"
with pytest.raises(OverflowError, match=msg):
df_nonprintable.to_json()
# the same with multiple columns threw segfaults
df_mixed = DataFrame({"A": [binthing], "B": [1]}, columns=["A", "B"])
with pytest.raises(OverflowError):
df_mixed.to_json()
# default_handler should resolve exceptions for non-string types
result = df_nonprintable.to_json(default_handler=str)
expected = f'{{"A":{{"0":"{hexed}"}}}}'
assert result == expected
assert (
df_mixed.to_json(default_handler=str)
== f'{{"A":{{"0":"{hexed}"}},"B":{{"0":1}}}}'
)
def test_label_overflow(self):
# GH14256: buffer length not checked when writing label
result = pd.DataFrame({"bar" * 100000: [1], "foo": [1337]}).to_json()
expected = f'{{"{"bar" * 100000}":{{"0":1}},"foo":{{"0":1337}}}}'
assert result == expected
def test_series_non_unique_index(self):
s = Series(["a", "b"], index=[1, 1])
msg = "Series index must be unique for orient='index'"
with pytest.raises(ValueError, match=msg):
s.to_json(orient="index")
tm.assert_series_equal(
s, read_json(s.to_json(orient="split"), orient="split", typ="series")
)
unser = read_json(s.to_json(orient="records"), orient="records", typ="series")
tm.assert_numpy_array_equal(s.values, unser.values)
def test_series_default_orient(self, string_series):
assert string_series.to_json() == string_series.to_json(orient="index")
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_simple(self, orient, numpy, string_series):
data = string_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = string_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [False, None])
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_object(self, orient, numpy, dtype, object_series):
data = object_series.to_json(orient=orient)
result = pd.read_json(
data, typ="series", orient=orient, numpy=numpy, dtype=dtype
)
expected = object_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_empty(self, orient, numpy, empty_series):
data = empty_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = empty_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
else:
expected.index = expected.index.astype(float)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_timeseries(self, orient, numpy, datetime_series):
data = datetime_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = datetime_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [np.float64, np.int])
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_numeric(self, orient, numpy, dtype):
s = Series(range(6), index=["a", "b", "c", "d", "e", "f"])
data = s.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = s.copy()
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
tm.assert_series_equal(result, expected)
def test_series_to_json_except(self):
s = Series([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
s.to_json(orient="garbage")
def test_series_from_json_precise_float(self):
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ="series", precise_float=True)
tm.assert_series_equal(result, s, check_index_type=False)
def test_series_with_dtype(self):
# GH 21986
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ="series", dtype=np.int64)
expected = Series([4] * 3)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dtype,expected",
[
(True, Series(["2000-01-01"], dtype="datetime64[ns]")),
(False, Series([946684800000])),
],
)
def test_series_with_dtype_datetime(self, dtype, expected):
s = Series(["2000-01-01"], dtype="datetime64[ns]")
data = s.to_json()
result = pd.read_json(data, typ="series", dtype=dtype)
tm.assert_series_equal(result, expected)
def test_frame_from_json_precise_float(self):
df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
result = read_json(df.to_json(), precise_float=True)
tm.assert_frame_equal(
result, df, check_index_type=False, check_column_type=False
)
def test_typ(self):
s = Series(range(6), index=["a", "b", "c", "d", "e", "f"], dtype="int64")
result = read_json(s.to_json(), typ=None)
tm.assert_series_equal(result, s)
def test_reconstruction_index(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
result = read_json(df.to_json())
tm.assert_frame_equal(result, df)
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["A", "B", "C"])
result = read_json(df.to_json())
tm.assert_frame_equal(result, df)
def test_path(self, float_frame):
with tm.ensure_clean("test.json") as path:
for df in [
float_frame,
self.intframe,
self.tsframe,
self.mixed_frame,
]:
df.to_json(path)
read_json(path)
def test_axis_dates(self, datetime_series):
# frame
json = self.tsframe.to_json()
result = read_json(json)
tm.assert_frame_equal(result, self.tsframe)
# series
json = datetime_series.to_json()
result = read_json(json, typ="series")
tm.assert_series_equal(result, datetime_series, check_names=False)
assert result.name is None
def test_convert_dates(self, datetime_series):
# frame
df = self.tsframe.copy()
df["date"] = Timestamp("20130101")
json = df.to_json()
result = read_json(json)
tm.assert_frame_equal(result, df)
df["foo"] = 1.0
json = df.to_json(date_unit="ns")
result = read_json(json, convert_dates=False)
expected = df.copy()
expected["date"] = expected["date"].values.view("i8")
expected["foo"] = expected["foo"].astype("int64")
tm.assert_frame_equal(result, expected)
# series
ts = Series(Timestamp("20130101"), index=datetime_series.index)
json = ts.to_json()
result = read_json(json, typ="series")
tm.assert_series_equal(result, ts)
@pytest.mark.parametrize("date_format", ["epoch", "iso"])
@pytest.mark.parametrize("as_object", [True, False])
@pytest.mark.parametrize(
"date_typ", [datetime.date, datetime.datetime, pd.Timestamp]
)
def test_date_index_and_values(self, date_format, as_object, date_typ):
data = [date_typ(year=2020, month=1, day=1), pd.NaT]
if as_object:
data.append("a")
ser = pd.Series(data, index=data)
result = ser.to_json(date_format=date_format)
if date_format == "epoch":
expected = '{"1577836800000":1577836800000,"null":null}'
else:
expected = (
'{"2020-01-01T00:00:00.000Z":"2020-01-01T00:00:00.000Z","null":null}'
)
if as_object:
expected = expected.replace("}", ',"a":"a"}')
assert result == expected
@pytest.mark.parametrize(
"infer_word",
[
"trade_time",
"date",
"datetime",
"sold_at",
"modified",
"timestamp",
"timestamps",
],
)
def test_convert_dates_infer(self, infer_word):
# GH10747
from pandas.io.json import dumps
data = [{"id": 1, infer_word: 1036713600000}, {"id": 2}]
expected = DataFrame(
[[1, Timestamp("2002-11-08")], [2, pd.NaT]], columns=["id", infer_word]
)
result = read_json(dumps(data))[["id", infer_word]]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"date,date_unit",
[
("20130101 20:43:42.123", None),
("20130101 20:43:42", "s"),
("20130101 20:43:42.123", "ms"),
("20130101 20:43:42.123456", "us"),
("20130101 20:43:42.123456789", "ns"),
],
)
def test_date_format_frame(self, date, date_unit):
df = self.tsframe.copy()
df["date"] = Timestamp(date)
df.iloc[1, df.columns.get_loc("date")] = pd.NaT
df.iloc[5, df.columns.get_loc("date")] = pd.NaT
if date_unit:
json = df.to_json(date_format="iso", date_unit=date_unit)
else:
json = df.to_json(date_format="iso")
result = read_json(json)
expected = df.copy()
expected.index = expected.index.tz_localize("UTC")
expected["date"] = expected["date"].dt.tz_localize("UTC")
tm.assert_frame_equal(result, expected)
def test_date_format_frame_raises(self):
df = self.tsframe.copy()
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
df.to_json(date_format="iso", date_unit="foo")
@pytest.mark.parametrize(
"date,date_unit",
[
("20130101 20:43:42.123", None),
("20130101 20:43:42", "s"),
("20130101 20:43:42.123", "ms"),
("20130101 20:43:42.123456", "us"),
("20130101 20:43:42.123456789", "ns"),
],
)
def test_date_format_series(self, date, date_unit, datetime_series):
ts = Series(Timestamp(date), index=datetime_series.index)
ts.iloc[1] = pd.NaT
ts.iloc[5] = pd.NaT
if date_unit:
json = ts.to_json(date_format="iso", date_unit=date_unit)
else:
json = ts.to_json(date_format="iso")
result = read_json(json, typ="series")
expected = ts.copy()
expected.index = expected.index.tz_localize("UTC")
expected = expected.dt.tz_localize("UTC")
tm.assert_series_equal(result, expected)
def test_date_format_series_raises(self, datetime_series):
ts = Series(Timestamp("20130101 20:43:42.123"), index=datetime_series.index)
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
ts.to_json(date_format="iso", date_unit="foo")
@pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"])
def test_date_unit(self, unit):
df = self.tsframe.copy()
df["date"] = Timestamp("20130101 20:43:42")
dl = df.columns.get_loc("date")
df.iloc[1, dl] = Timestamp("19710101 20:43:42")
df.iloc[2, dl] = Timestamp("21460101 20:43:42")
df.iloc[4, dl] = pd.NaT
json = df.to_json(date_format="epoch", date_unit=unit)
# force date unit
result = read_json(json, date_unit=unit)
tm.assert_frame_equal(result, df)
# detect date unit
result = read_json(json, date_unit=None)
tm.assert_frame_equal(result, df)
def test_weird_nested_json(self):
# this used to core dump the parser
s = r"""{
"status": "success",
"data": {
"posts": [
{
"id": 1,
"title": "A blog post",
"body": "Some useful content"
},
{
"id": 2,
"title": "Another blog post",
"body": "More content"
}
]
}
}"""
read_json(s)
def test_doc_example(self):
dfj2 = DataFrame(np.random.randn(5, 2), columns=list("AB"))
dfj2["date"] = Timestamp("20130101")
dfj2["ints"] = range(5)
dfj2["bools"] = True
dfj2.index = pd.date_range("20130101", periods=5)
json = dfj2.to_json()
result =
|
read_json(json, dtype={"ints": np.int64, "bools": np.bool_})
|
pandas.read_json
|
import pytest
import d6tflow
import sklearn, sklearn.datasets, sklearn.svm, sklearn.linear_model
import pandas as pd
import numpy as np
# define workflow
class TaskGetData(d6tflow.tasks.TaskPqPandas): # save dataframe as parquet
def run(self):
ds = sklearn.datasets.load_breast_cancer()
df_train =
|
pd.DataFrame(ds.data, columns=ds.feature_names)
|
pandas.DataFrame
|
from datetime import datetime
from decimal import Decimal
import numpy as np
import pytest
import pytz
from pandas.compat import is_platform_little_endian
from pandas import CategoricalIndex, DataFrame, Index, Interval, RangeIndex, Series
import pandas._testing as tm
class TestFromRecords:
def test_from_records_with_datetimes(self):
# this may fail on certain platforms because of a numpy issue
# related GH#6140
if not is_platform_little_endian():
pytest.skip("known failure of test on non-little endian")
# construction with a null in a recarray
# GH#6140
expected = DataFrame({"EXPIRY": [datetime(2005, 3, 1, 0, 0), None]})
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [("EXPIRY", "<M8[ns]")]
try:
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
except (ValueError):
pytest.skip("known failure of numpy rec array creation")
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
# coercion should work too
arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
dtypes = [("EXPIRY", "<M8[m]")]
recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)
result = DataFrame.from_records(recarray)
tm.assert_frame_equal(result, expected)
def test_from_records_sequencelike(self):
df = DataFrame(
{
"A": np.array(np.random.randn(6), dtype=np.float64),
"A1": np.array(np.random.randn(6), dtype=np.float64),
"B": np.array(np.arange(6), dtype=np.int64),
"C": ["foo"] * 6,
"D": np.array([True, False] * 3, dtype=bool),
"E": np.array(np.random.randn(6), dtype=np.float32),
"E1": np.array(np.random.randn(6), dtype=np.float32),
"F": np.array(np.arange(6), dtype=np.int32),
}
)
# this is actually tricky to create the recordlike arrays and
# have the dtypes be intact
blocks = df._to_dict_of_blocks()
tuples = []
columns = []
dtypes = []
for dtype, b in blocks.items():
columns.extend(b.columns)
dtypes.extend([(c, np.dtype(dtype).descr[0][1]) for c in b.columns])
for i in range(len(df.index)):
tup = []
for _, b in blocks.items():
tup.extend(b.iloc[i].values)
tuples.append(tuple(tup))
recarray = np.array(tuples, dtype=dtypes).view(np.recarray)
recarray2 = df.to_records()
lists = [list(x) for x in tuples]
# tuples (lose the dtype info)
result = DataFrame.from_records(tuples, columns=columns).reindex(
columns=df.columns
)
# created recarray and with to_records recarray (have dtype info)
result2 = DataFrame.from_records(recarray, columns=columns).reindex(
columns=df.columns
)
result3 = DataFrame.from_records(recarray2, columns=columns).reindex(
columns=df.columns
)
# list of tupels (no dtype info)
result4 = DataFrame.from_records(lists, columns=columns).reindex(
columns=df.columns
)
tm.assert_frame_equal(result, df, check_dtype=False)
tm.assert_frame_equal(result2, df)
tm.assert_frame_equal(result3, df)
tm.assert_frame_equal(result4, df, check_dtype=False)
# tuples is in the order of the columns
result = DataFrame.from_records(tuples)
tm.assert_index_equal(result.columns, RangeIndex(8))
# test exclude parameter & we are casting the results here (as we don't
# have dtype info to recover)
columns_to_test = [columns.index("C"), columns.index("E1")]
exclude = list(set(range(8)) - set(columns_to_test))
result = DataFrame.from_records(tuples, exclude=exclude)
result.columns = [columns[i] for i in sorted(columns_to_test)]
tm.assert_series_equal(result["C"], df["C"])
tm.assert_series_equal(result["E1"], df["E1"].astype("float64"))
# empty case
result = DataFrame.from_records([], columns=["foo", "bar", "baz"])
assert len(result) == 0
tm.assert_index_equal(result.columns, Index(["foo", "bar", "baz"]))
result = DataFrame.from_records([])
assert len(result) == 0
assert len(result.columns) == 0
def test_from_records_dictlike(self):
# test the dict methods
df = DataFrame(
{
"A": np.array(np.random.randn(6), dtype=np.float64),
"A1": np.array(np.random.randn(6), dtype=np.float64),
"B": np.array(np.arange(6), dtype=np.int64),
"C": ["foo"] * 6,
"D": np.array([True, False] * 3, dtype=bool),
"E": np.array(np.random.randn(6), dtype=np.float32),
"E1": np.array(np.random.randn(6), dtype=np.float32),
"F": np.array(np.arange(6), dtype=np.int32),
}
)
# columns is in a different order here than the actual items iterated
# from the dict
blocks = df._to_dict_of_blocks()
columns = []
for dtype, b in blocks.items():
columns.extend(b.columns)
asdict = {x: y for x, y in df.items()}
asdict2 = {x: y.values for x, y in df.items()}
# dict of series & dict of ndarrays (have dtype info)
results = []
results.append(DataFrame.from_records(asdict).reindex(columns=df.columns))
results.append(
DataFrame.from_records(asdict, columns=columns).reindex(columns=df.columns)
)
results.append(
DataFrame.from_records(asdict2, columns=columns).reindex(columns=df.columns)
)
for r in results:
tm.assert_frame_equal(r, df)
def test_from_records_with_index_data(self):
df = DataFrame(np.random.randn(10, 3), columns=["A", "B", "C"])
data = np.random.randn(10)
df1 = DataFrame.from_records(df, index=data)
tm.assert_index_equal(df1.index, Index(data))
def test_from_records_bad_index_column(self):
df = DataFrame(np.random.randn(10, 3), columns=["A", "B", "C"])
# should pass
df1 = DataFrame.from_records(df, index=["C"])
tm.assert_index_equal(df1.index, Index(df.C))
df1 = DataFrame.from_records(df, index="C")
tm.assert_index_equal(df1.index, Index(df.C))
# should fail
msg = r"Shape of passed values is \(10, 3\), indices imply \(1, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame.from_records(df, index=[2])
with pytest.raises(KeyError, match=r"^2$"):
DataFrame.from_records(df, index=2)
def test_from_records_non_tuple(self):
class Record:
def __init__(self, *args):
self.args = args
def __getitem__(self, i):
return self.args[i]
def __iter__(self):
return iter(self.args)
recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]
tups = [tuple(rec) for rec in recs]
result = DataFrame.from_records(recs)
expected = DataFrame.from_records(tups)
tm.assert_frame_equal(result, expected)
def test_from_records_len0_with_columns(self):
# GH#2633
result = DataFrame.from_records([], index="foo", columns=["foo", "bar"])
expected = Index(["bar"])
assert len(result) == 0
assert result.index.name == "foo"
tm.assert_index_equal(result.columns, expected)
def test_from_records_series_list_dict(self):
# GH#27358
expected = DataFrame([[{"a": 1, "b": 2}, {"a": 3, "b": 4}]]).T
data = Series([[{"a": 1, "b": 2}], [{"a": 3, "b": 4}]])
result = DataFrame.from_records(data)
tm.assert_frame_equal(result, expected)
def test_from_records_series_categorical_index(self):
# GH#32805
index = CategoricalIndex(
[Interval(-20, -10), Interval(-10, 0), Interval(0, 10)]
)
series_of_dicts = Series([{"a": 1}, {"a": 2}, {"b": 3}], index=index)
frame = DataFrame.from_records(series_of_dicts, index=index)
expected = DataFrame(
{"a": [1, 2, np.NaN], "b": [np.NaN, np.NaN, 3]}, index=index
)
tm.assert_frame_equal(frame, expected)
def test_frame_from_records_utc(self):
rec = {"datum": 1.5, "begin_time": datetime(2006, 4, 27, tzinfo=pytz.utc)}
# it works
DataFrame.from_records([rec], index="begin_time")
def test_from_records_to_records(self):
# from numpy documentation
arr = np.zeros((2,), dtype=("i4,f4,a10"))
arr[:] = [(1, 2.0, "Hello"), (2, 3.0, "World")]
# TODO(wesm): unused
frame = DataFrame.from_records(arr) # noqa
index = Index(np.arange(len(arr))[::-1])
indexed_frame = DataFrame.from_records(arr, index=index)
tm.assert_index_equal(indexed_frame.index, index)
# without names, it should go to last ditch
arr2 = np.zeros((2, 3))
tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))
# wrong length
msg = r"Shape of passed values is \(2, 3\), indices imply \(1, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame.from_records(arr, index=index[:-1])
indexed_frame = DataFrame.from_records(arr, index="f1")
# what to do?
records = indexed_frame.to_records()
assert len(records.dtype.names) == 3
records = indexed_frame.to_records(index=False)
assert len(records.dtype.names) == 2
assert "index" not in records.dtype.names
def test_from_records_nones(self):
tuples = [(1, 2, None, 3), (1, 2, None, 3), (None, 2, 5, 3)]
df = DataFrame.from_records(tuples, columns=["a", "b", "c", "d"])
assert np.isnan(df["c"][0])
def test_from_records_iterator(self):
arr = np.array(
[(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5.0, 5.0, 6, 6), (7.0, 7.0, 8, 8)],
dtype=[
("x", np.float64),
("u", np.float32),
("y", np.int64),
("z", np.int32),
],
)
df = DataFrame.from_records(iter(arr), nrows=2)
xp = DataFrame(
{
"x": np.array([1.0, 3.0], dtype=np.float64),
"u": np.array([1.0, 3.0], dtype=np.float32),
"y": np.array([2, 4], dtype=np.int64),
"z": np.array([2, 4], dtype=np.int32),
}
)
tm.assert_frame_equal(df.reindex_like(xp), xp)
# no dtypes specified here, so just compare with the default
arr = [(1.0, 2), (3.0, 4), (5.0, 6), (7.0, 8)]
df = DataFrame.from_records(iter(arr), columns=["x", "y"], nrows=2)
tm.assert_frame_equal(df, xp.reindex(columns=["x", "y"]), check_dtype=False)
def test_from_records_tuples_generator(self):
def tuple_generator(length):
for i in range(length):
letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
yield (i, letters[i % len(letters)], i / length)
columns_names = ["Integer", "String", "Float"]
columns = [
[i[j] for i in tuple_generator(10)] for j in range(len(columns_names))
]
data = {"Integer": columns[0], "String": columns[1], "Float": columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = tuple_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_lists_generator(self):
def list_generator(length):
for i in range(length):
letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
yield [i, letters[i % len(letters)], i / length]
columns_names = ["Integer", "String", "Float"]
columns = [
[i[j] for i in list_generator(10)] for j in range(len(columns_names))
]
data = {"Integer": columns[0], "String": columns[1], "Float": columns[2]}
expected = DataFrame(data, columns=columns_names)
generator = list_generator(10)
result = DataFrame.from_records(generator, columns=columns_names)
tm.assert_frame_equal(result, expected)
def test_from_records_columns_not_modified(self):
tuples = [(1, 2, 3), (1, 2, 3), (2, 5, 3)]
columns = ["a", "b", "c"]
original_columns = list(columns)
df = DataFrame.from_records(tuples, columns=columns, index="a") # noqa
assert columns == original_columns
def test_from_records_decimal(self):
tuples = [(Decimal("1.5"),), (Decimal("2.5"),), (None,)]
df = DataFrame.from_records(tuples, columns=["a"])
assert df["a"].dtype == object
df = DataFrame.from_records(tuples, columns=["a"], coerce_float=True)
assert df["a"].dtype == np.float64
assert np.isnan(df["a"].values[-1])
def test_from_records_duplicates(self):
result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "a"])
expected = DataFrame([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "a"])
tm.assert_frame_equal(result, expected)
def test_from_records_set_index_name(self):
def create_dict(order_id):
return {
"order_id": order_id,
"quantity": np.random.randint(1, 10),
"price": np.random.randint(1, 10),
}
documents = [create_dict(i) for i in range(10)]
# demo missing data
documents.append({"order_id": 10, "quantity": 5})
result = DataFrame.from_records(documents, index="order_id")
assert result.index.name == "order_id"
# MultiIndex
result = DataFrame.from_records(documents, index=["order_id", "quantity"])
assert result.index.names == ("order_id", "quantity")
def test_from_records_misc_brokenness(self):
# GH#2179
data = {1: ["foo"], 2: ["bar"]}
result = DataFrame.from_records(data, columns=["a", "b"])
exp = DataFrame(data, columns=["a", "b"])
|
tm.assert_frame_equal(result, exp)
|
pandas._testing.assert_frame_equal
|
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
import numpy as np
import gensim
import gensim.downloader as api
import pickle
def dummy_fun(doc):
return doc
'''
Abstract class for that wraps around vectorizers.
'''
class VectorizerClassBase():
model = None
default_pickle_path = None
def print_debug_info(self):
raise NotImplementedError
def load(self, pickle_path=None):
'''
Loads model from pickle file.
:param pickle_path: File to load from. If None, loads from default path.
'''
file_path = self.default_pickle_path if pickle_path is None else pickle_path
self.model = pickle.load(open(file_path, 'rb'))
def save(self, pickle_path=None):
'''
Saves model to pickle file
:param pickle_path: File to save to. If None, saves to default path.
'''
file_path = self.default_pickle_path if pickle_path is None else pickle_path
pickle.dump(self.model, open(file_path, 'wb+'))
def fit(self, df, column_to_fit_on='clean_text'):
'''
Fits vectorizer on dataframe df.
:param
df: Pandas Dataframe containing examples.
column_to_fit_on: name of column in df containing examples.
'''
raise NotImplementedError
def run(self, df, column_to_run_on='clean_text',label_columns=[]):
'''
Runs vectorizer on dataframe df.
:param df: Pandas Dataframe containing examples.
:param column_to_run_on: name of column in df containing examples.
:param label_columns: names of column containing human labels to copy into output df. If None, does nothing.
:return:
dataframe containing embedded data.
'''
raise NotImplementedError
class Word2VecVectorizerClass(VectorizerClassBase):
pickle_path = "./saved_vectorizers/Word2Vec_vectorizer.pkl"
words_found = 0
words_not_found = 0
words_not_found_list = []
# TODO(Renu): figure out if model can pickled
def load(self):
raise NotImplementedError
# TODO(Renu): figure out if model can pickled
def save(self):
raise NotImplementedError
def get_avg_word2vec(self,doc):
'''
Returns average of word2vec embeddings for document doc.
:param doc: list of words in document
:return: vector holding average of word2vec embeddings
'''
word_vectors = []
for word in doc:
try:
vector = self.model.get_vector(word)
word_vectors.append(vector)
self.words_found += 1
except KeyError:
self.words_not_found += 1
self.words_not_found_list.append(word)
return np.mean(word_vectors, axis=0)
def fit(self, df, column_to_fit_on='clean_text'):
path = api.load('word2vec-google-news-300', return_path=True)
self.model = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True)
def run(self, df, column_to_run_on='clean_text', label_columns=[]):
# reinitialize counters
self.words_found = 0
self.words_not_found = 0
self.words_not_found_list = []
list_of_averages = df[column_to_run_on].apply(lambda doc: self.get_avg_word2vec(doc)).to_list()
final_df = pd.DataFrame(list_of_averages)
for label_column in label_columns:
final_df[label_column] = df[label_column].to_list()
return final_df
def print_debug_info(self):
print("words not found ", self.words_not_found)
print("words found ", self.words_found)
print("% of words not found ", (self.words_not_found / (self.words_not_found + self.words_found)) * 100)
class TfIdfVectorizerClass(VectorizerClassBase):
pickle_path = "./saved_vectorizers/TfIdf_vectorizer.pkl"
def fit(self, df, column_to_fit_on='clean_text'):
self.model = TfidfVectorizer(
analyzer='word',
tokenizer=dummy_fun,
preprocessor=dummy_fun,
token_pattern=None, min_df=5)
docs = df[column_to_fit_on].to_list()
self.model.fit(docs)
def run(self, df, column_to_run_on='clean_text', label_columns=[]):
docs = df[column_to_run_on].to_list()
sparse_vectors = self.model.transform(docs)
flattened_vectors = [sparse_vector.toarray().flatten() for sparse_vector in sparse_vectors]
final_df =
|
pd.DataFrame(flattened_vectors)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import pytest
from etna.datasets.tsdataset import TSDataset
from etna.models.linear import ElasticMultiSegmentModel
from etna.models.linear import ElasticPerSegmentModel
from etna.models.linear import LinearMultiSegmentModel
from etna.models.linear import LinearPerSegmentModel
from etna.transforms.datetime_flags import DateFlagsTransform
from etna.transforms.lags import LagTransform
@pytest.fixture
def ts_with_categoricals(random_seed) -> TSDataset:
periods = 100
df1 = pd.DataFrame({"timestamp": pd.date_range("2020-01-01", periods=periods)})
df1["segment"] = "segment_1"
df1["target"] = np.random.uniform(10, 20, size=periods)
df1["cat_feature"] = "x"
df2 = pd.DataFrame({"timestamp": pd.date_range("2020-01-01", periods=periods)})
df2["segment"] = "segment_2"
df2["target"] = np.random.uniform(-15, 5, size=periods)
df1["cat_feature"] = "y"
df = pd.concat([df1, df2]).reset_index(drop=True)
df = TSDataset.to_dataset(df)
ts = TSDataset(df, freq="D")
return ts
def linear_segments_by_parameters(alpha_values, intercept_values):
dates = pd.date_range(start="2020-02-01", freq="D", periods=210)
x = np.arange(210)
train, test = [], []
for i in range(3):
train.append(pd.DataFrame())
test.append(pd.DataFrame())
train[i]["timestamp"], test[i]["timestamp"] = dates[:-7], dates[-7:]
train[i]["segment"], test[i]["segment"] = f"segment_{i}", f"segment_{i}"
alpha = alpha_values[i]
intercept = intercept_values[i]
target = x * alpha + intercept
train[i]["target"], test[i]["target"] = target[:-7], target[-7:]
train_df_all =
|
pd.concat(train, ignore_index=True)
|
pandas.concat
|
from __future__ import division # brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
from ..ted_exe import Ted
test = {}
class TestTed(unittest.TestCase):
"""
Unit tests for TED model.
"""
print("ted unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for ted unit tests.
:return:
"""
pass
def tearDown(self):
"""
Teardown routine for ted unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_ted_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty ted object
ted_empty = Ted(df_empty, df_empty)
return ted_empty
def test_daily_app_flag(self):
"""
:description generates a daily flag to denote whether a pesticide is applied that day or not (1 - applied, 0 - anot applied)
:param num_apps; number of applications
:param app_interval; number of days between applications
:NOTE in TED model there are two application scenarios per simulation (one for a min/max exposure scenario)
(this is why the parameters are passed in)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='bool')
result = pd.Series([[]], dtype='bool')
expected_results = [[True, False, False, True, False, False, True, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, True, False, False,
False, False, False, False, True, False, False, False, False, False,
False, True, False, False, False, False, False, False, True, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
# input varialbles that change per simulation
ted_empty.num_apps_min = pd.Series([3, 5, 1])
ted_empty.app_interval_min = pd.Series([3, 7, 1])
for i in range (3):
result[i] = ted_empty.daily_app_flag(ted_empty.num_apps_min[i], ted_empty.app_interval_min[i])
np.array_equal(result[i],expected_results[i])
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_set_drift_parameters(self):
"""
:description provides parmaeter values to use when calculating distances from edge of application source area to
concentration of interest
:param app_method; application method (aerial/ground/airblast)
:param boom_hgt; height of boom (low/high) - 'NA' if not ground application
:param drop_size; droplet spectrum for application (see list below for aerial/ground - 'NA' if airblast)
:param param_a (result[i][0]; parameter a for spray drift distance calculation
:param param_b (result[i][1]; parameter b for spray drift distance calculation
:param param_c (result[i][2]; parameter c for spray drift distance calculation
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series(9*[[0.,0.,0.]], dtype='float')
expected_results = [[0.0292,0.822,0.6539],[0.043,1.03,0.5],[0.0721,1.0977,0.4999],[0.1014,1.1344,0.4999],
[1.0063,0.9998,1.0193],[5.5513,0.8523,1.0079],[0.1913,1.2366,1.0552],
[2.4154,0.9077,1.0128],[0.0351,2.4586,0.4763]]
try:
# input variable that change per simulation
ted_empty.app_method_min = pd.Series(['aerial','aerial','aerial','aerial','ground','ground','ground','ground','airblast'])
ted_empty.boom_hgt_min = pd.Series(['','','','','low','low','high','high',''])
ted_empty.droplet_spec_min = pd.Series(['very_fine_to_fine','fine_to_medium','medium_to_coarse','coarse_to_very_coarse',
'very_fine_to_fine','fine_to_medium-coarse','very_fine_to_fine','fine_to_medium-coarse',''])
for i in range (9): # test that the nine combinations are accessed
result[i][0], result[i][1], result[i][2] = ted_empty.set_drift_parameters(ted_empty.app_method_min[i], ted_empty.boom_hgt_min[i], ted_empty.droplet_spec_min[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range (9):
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_drift_distance_calc(self):
"""
:description provides parmaeter values to use when calculating distances from edge of application source area to
concentration of interest
:param app_rate_frac; fraction of active ingredient application rate equivalent to the health threshold of concern
:param param_a; parameter a for spray drift distance calculation
:param param_b; parameter b for spray drift distance calculation
:param param_c; parameter c for spray drift distance calculation
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [302.050738, 11.484378, 0.0]
try:
# internal model constants
ted_empty.max_distance_from_source = 1000.
# input variable that is internally specified from among options
param_a = pd.Series([0.0292, 0.1913, 0.0351], dtype='float')
param_b = pd.Series([0.822, 1.2366, 2.4586], dtype='float')
param_c = pd.Series([0.6539, 1.0522, 0.4763], dtype='float')
# internally calculated variables
app_rate_frac = pd.Series([0.1,0.25,0.88], dtype='float')
for i in range(3):
result[i] = ted_empty.drift_distance_calc(app_rate_frac[i], param_a[i], param_b[i], param_c[i], ted_empty.max_distance_from_source)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_timestep(self):
"""
:description unittest for function conc_timestep:
:param conc_ini; initial concentration for day (actually previous day concentration)
:param half_life; halflife of pesiticde representing either foliar dissipation halflife or aerobic soil metabolism halflife (days)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [9.803896e-4, 0.106066, 1.220703e-3]
try:
# input variable that is internally specified from among options
half_life = pd.Series([35., 2., .1])
# internally calculated variables
conc_ini = pd.Series([1.e-3, 0.15, 1.25])
result = ted_empty.conc_timestep(conc_ini, half_life)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial_canopy_air(self):
"""
:description calculates initial (1st application day) air concentration of pesticide within plant canopy (ug/mL)
:param application rate; active ingredient application rate (lbs a.i./acre)
:param mass_pest; mass of pesticide on treated field (mg)
:param volume_air; volume of air in 1 hectare to a height equal to the height of the crop canopy
:param biotransfer_factor; the volume_based biotransfer factor; function of Henry's las constant and Log Kow
NOTE: this represents Eq 24 (and supporting eqs 25,26,27) of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [1.152526e-7, 1.281910e-5, 7.925148e-8]
try:
# internal model constants
ted_empty.hectare_to_acre = 2.47105
ted_empty.gms_to_mg = 1000.
ted_empty.lbs_to_gms = 453.592
ted_empty.crop_hgt = 1. #m
ted_empty.hectare_area = 10000. #m2
ted_empty.m3_to_liters = 1000.
ted_empty.mass_plant = 25000. # kg/hectare
ted_empty.density_plant = 0.77 #kg/L
# input variables that change per simulation
ted_empty.log_kow = pd.Series([2., 4., 6.], dtype='float')
ted_empty.log_unitless_hlc = pd.Series([-5., -3., -4.], dtype='float')
ted_empty.app_rate_min = pd.Series([1.e-3, 0.15, 1.25]) # lbs a.i./acre
for i in range(3): #let's do 3 iterations
result[i] = ted_empty.conc_initial_canopy_air(i, ted_empty.app_rate_min[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial_soil_h2o(self):
"""
:description calculates initial (1st application day) concentration in soil pore water or surface puddles(ug/L)
:param application rate; active ingredient application rate (lbs a.i./acre)
:param soil_depth
:param soil_bulk_density; kg/L
:param porosity; soil porosity
:param frac_org_cont_soil; fraction organic carbon in soil
:param app_rate_conv; conversion factor used to convert units of application rate (lbs a.i./acre) to (ug a.i./mL)
:NOTE this represents Eq 3 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
(the depth of water in this equation is assumed to be 0.0 and therefore not included here)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [5.067739e-3, 1.828522, 6.13194634]
try:
# internal model constants
ted_empty.app_rate_conv1 = 11.2
ted_empty.soil_depth = 2.6 # cm
ted_empty.soil_porosity = 0.35
ted_empty.soil_bulk_density = 1.5 # kg/L
ted_empty.soil_foc = 0.015
ted_empty.h2o_depth_soil = 0.0
ted_empty.h2o_depth_puddles = 1.3
# internally specified variable
ted_empty.water_type = pd.Series(["puddles", "pore_water", "puddles"])
# input variables that change per simulation
ted_empty.koc = pd.Series([1.e-3, 0.15, 1.25])
ted_empty.app_rate_min = pd.Series([1.e-3, 0.15, 1.25]) # lbs a.i./acre
for i in range(3): #let's do 3 iterations
result[i] = ted_empty.conc_initial_soil_h2o(i, ted_empty.app_rate_min[i], ted_empty.water_type[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial_plant(self):
"""
:description calculates initial (1st application day) dietary based EEC (residue concentration) from pesticide application
(mg/kg-diet for food items including short/tall grass, broadleaf plants, seeds/fruit/pods, and above ground arthropods)
:param application rate; active ingredient application rate (lbs a.i./acre)
:param food_multiplier; factor by which application rate of active ingredient is multiplied to estimate dietary based EECs
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [1.5e-2, 22.5, 300.]
try:
# input variables that change per simulation
ted_empty.food_multiplier = pd.Series([15., 150., 240.])
ted_empty.app_rate_min = pd.Series([1.e-3, 0.15, 1.25]) # lbs a.i./acre
result = ted_empty.conc_initial_plant(ted_empty.app_rate_min, ted_empty.food_multiplier)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_animal_dietary_intake(self):
"""
:description generates pesticide intake via consumption of diet containing pesticide for animals (mammals, birds, amphibians, reptiles)
:param a1; coefficient of allometric expression
:param b1; exponent of allometric expression
:param body_wgt; body weight of species (g)
:param frac_h2o; fraction of water in food item
# this represents Eqs 6 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [8.050355, 3.507997, 64.92055]
try:
# internally specified parameters
a1 = pd.Series([.398, .013, .621], dtype='float')
b1 = pd.Series([.850, .773, .564], dtype='float')
# variables from external database
body_wgt = pd.Series([10., 120., 450.], dtype='float')
frac_h2o = pd.Series([0.65, 0.85, 0.7], dtype='float')
result = ted_empty.animal_dietary_intake(a1, b1, body_wgt, frac_h2o)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_animal_dietary_dose(self):
"""
:description generates pesticide dietary-based dose for animals (mammals, birds, amphibians, reptiles)
:param body_wgt; body weight of species (g)
:param frac_h2o; fraction of water in food item
:param food_intake_rate; ingestion rate of food item (g/day-ww)
:param food_pest_conc; pesticide concentration in food item (mg a.i./kg)
# this represents Eqs 5 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [3.e-4, 3.45e-2, 4.5]
try:
# variables from external database
body_wgt = pd.Series([10., 120., 450.], dtype='float')
# internally calculated variables
food_intake_rate = pd.Series([3., 12., 45.], dtype='float')
food_pest_conc = pd.Series([1.e-3, 3.45e-1, 4.50e+1], dtype='float')
result = ted_empty.animal_dietary_dose(body_wgt, food_intake_rate, food_pest_conc)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_daily_plant_timeseries(self):
"""
:description generates annual timeseries of daily pesticide residue concentration (EECs) for a food item
:param i; simulation number/index
:param application rate; active ingredient application rate (lbs a.i./acre)
:param food_multiplier; factor by which application rate of active ingredient is multiplied to estimate dietary based EECs
:param daily_flag; daily flag denoting if pesticide is applied (0 - not applied, 1 - applied)
:Notes # calculations are performed daily from day of first application (assumed day 0) through the last day of a year
# note: day numbers are synchronized with 0-based array indexing; thus the year does not have a calendar specific
# assoication, rather it is one year from the day of 1st pesticide application
#expected results generated by running OPP spreadsheet with appropriate inputs
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [[2.700000E+00,2.578072E+00,2.461651E+00,5.050487E+00,4.822415E+00,4.604642E+00,7.096704E+00,
6.776228E+00,6.470225E+00,6.178040E+00,5.899049E+00,5.632658E+00,5.378296E+00,5.135421E+00,
4.903513E+00,4.682078E+00,4.470643E+00,4.268756E+00,4.075986E+00,3.891921E+00,3.716168E+00,
3.548352E+00,3.388114E+00,3.235112E+00,3.089020E+00,2.949525E+00,2.816329E+00,2.689148E+00,
2.567710E+00,2.451757E+00,2.341039E+00,2.235322E+00,2.134378E+00,2.037993E+00,1.945961E+00,
1.858084E+00,1.774176E+00,1.694057E+00,1.617556E+00,1.544510E+00,1.474762E+00,1.408164E+00,
1.344574E+00,1.283855E+00,1.225878E+00,1.170520E+00,1.117661E+00,1.067189E+00,1.018997E+00,
9.729803E-01,9.290420E-01,8.870880E-01,8.470285E-01,8.087781E-01,7.722549E-01,7.373812E-01,
7.040822E-01,6.722870E-01,6.419276E-01,6.129392E-01,5.852598E-01,5.588304E-01,5.335945E-01,
5.094983E-01,4.864901E-01,4.645210E-01,4.435440E-01,4.235143E-01,4.043890E-01,3.861275E-01,
3.686906E-01,3.520411E-01,3.361435E-01,3.209638E-01,3.064696E-01,2.926299E-01,2.794152E-01,
2.667973E-01,2.547491E-01,2.432451E-01,2.322605E-01,2.217720E-01,2.117571E-01,2.021945E-01,
1.930637E-01,1.843453E-01,1.760206E-01,1.680717E-01,1.604819E-01,1.532348E-01,1.463150E-01,
1.397076E-01,1.333986E-01,1.273746E-01,1.216225E-01,1.161303E-01,1.108860E-01,1.058786E-01,
1.010973E-01,9.653187E-02,9.217264E-02,8.801028E-02,8.403587E-02,8.024095E-02,7.661739E-02,
7.315748E-02,6.985380E-02,6.669932E-02,6.368728E-02,6.081127E-02,5.806513E-02,5.544300E-02,
5.293928E-02,5.054863E-02,4.826593E-02,4.608632E-02,4.400514E-02,4.201794E-02,4.012047E-02,
3.830870E-02,3.657874E-02,3.492690E-02,3.334966E-02,3.184364E-02,3.040563E-02,2.903256E-02,
2.772150E-02,2.646964E-02,2.527431E-02,2.413297E-02,2.304316E-02,2.200257E-02,2.100897E-02,
2.006024E-02,1.915435E-02,1.828937E-02,1.746345E-02,1.667483E-02,1.592182E-02,1.520282E-02,
1.451628E-02,1.386075E-02,1.323482E-02,1.263716E-02,1.206648E-02,1.152158E-02,1.100128E-02,
1.050448E-02,1.003012E-02,9.577174E-03,9.144684E-03,8.731725E-03,8.337415E-03,7.960910E-03,
7.601408E-03,7.258141E-03,6.930375E-03,6.617410E-03,6.318579E-03,6.033242E-03,5.760790E-03,
5.500642E-03,5.252242E-03,5.015059E-03,4.788587E-03,4.572342E-03,4.365863E-03,4.168707E-03,
3.980455E-03,3.800704E-03,3.629070E-03,3.465187E-03,3.308705E-03,3.159289E-03,3.016621E-03,
2.880395E-03,2.750321E-03,2.626121E-03,2.507530E-03,2.394294E-03,2.286171E-03,2.182931E-03,
2.084354E-03,1.990228E-03,1.900352E-03,1.814535E-03,1.732594E-03,1.654353E-03,1.579645E-03,
1.508310E-03,1.440198E-03,1.375161E-03,1.313061E-03,1.253765E-03,1.197147E-03,1.143086E-03,
1.091466E-03,1.042177E-03,9.951138E-04,9.501760E-04,9.072676E-04,8.662969E-04,8.271763E-04,
7.898223E-04,7.541552E-04,7.200988E-04,6.875803E-04,6.565303E-04,6.268824E-04,5.985734E-04,
5.715428E-04,5.457328E-04,5.210884E-04,4.975569E-04,4.750880E-04,4.536338E-04,4.331484E-04,
4.135881E-04,3.949112E-04,3.770776E-04,3.600494E-04,3.437901E-04,3.282651E-04,3.134412E-04,
2.992867E-04,2.857714E-04,2.728664E-04,2.605442E-04,2.487784E-04,2.375440E-04,2.268169E-04,
2.165742E-04,2.067941E-04,1.974556E-04,1.885388E-04,1.800247E-04,1.718951E-04,1.641326E-04,
1.567206E-04,1.496433E-04,1.428857E-04,1.364332E-04,1.302721E-04,1.243892E-04,1.187720E-04,
1.134085E-04,1.082871E-04,1.033970E-04,9.872779E-05,9.426940E-05,9.001235E-05,8.594753E-05,
8.206628E-05,7.836030E-05,7.482167E-05,7.144285E-05,6.821660E-05,6.513605E-05,6.219461E-05,
5.938600E-05,5.670423E-05,5.414355E-05,5.169852E-05,4.936390E-05,4.713470E-05,4.500617E-05,
4.297377E-05,4.103314E-05,3.918015E-05,3.741084E-05,3.572142E-05,3.410830E-05,3.256803E-05,
3.109731E-05,2.969300E-05,2.835211E-05,2.707178E-05,2.584926E-05,2.468195E-05,2.356735E-05,
2.250309E-05,2.148688E-05,2.051657E-05,1.959007E-05,1.870542E-05,1.786071E-05,1.705415E-05,
1.628401E-05,1.554865E-05,1.484650E-05,1.417606E-05,1.353589E-05,1.292463E-05,1.234097E-05,
1.178368E-05,1.125154E-05,1.074344E-05,1.025829E-05,9.795037E-06,9.352709E-06,8.930356E-06,
8.527075E-06,8.142006E-06,7.774326E-06,7.423250E-06,7.088028E-06,6.767944E-06,6.462315E-06,
6.170487E-06,5.891838E-06,5.625772E-06,5.371721E-06,5.129143E-06,4.897519E-06,4.676355E-06,
4.465178E-06,4.263538E-06,4.071003E-06,3.887163E-06,3.711625E-06,3.544014E-06,3.383972E-06,
3.231157E-06,3.085243E-06,2.945919E-06,2.812886E-06,2.685860E-06,2.564571E-06,2.448759E-06,
2.338177E-06,2.232589E-06,2.131769E-06,2.035502E-06,1.943582E-06,1.855813E-06,1.772007E-06,
1.691986E-06,1.615579E-06,1.542622E-06,1.472959E-06,1.406443E-06,1.342930E-06,1.282286E-06,
1.224380E-06,1.169089E-06,1.116294E-06,1.065884E-06,1.017751E-06,9.717908E-07,9.279063E-07,
8.860035E-07,8.459930E-07,8.077893E-07,7.713109E-07,7.364797E-07,7.032215E-07,6.714651E-07,
6.411428E-07,6.121898E-07,5.845443E-07,5.581472E-07,5.329422E-07,5.088754E-07,4.858954E-07,
4.639531E-07,4.430018E-07],
[5.500000E+01,5.349602E+01,5.203317E+01,5.061032E+01,4.922638E+01,4.788028E+01,4.657099E+01,
1.002975E+02,9.755487E+01,9.488722E+01,9.229253E+01,8.976878E+01,8.731405E+01,8.492644E+01,
1.376041E+02,1.338413E+02,1.301814E+02,1.266216E+02,1.231591E+02,1.197913E+02,1.165156E+02,
1.683295E+02,1.637265E+02,1.592494E+02,1.548947E+02,1.506591E+02,1.465394E+02,1.425322E+02,
1.936347E+02,1.883397E+02,1.831896E+02,1.781802E+02,1.733079E+02,1.685688E+02,1.639593E+02,
1.594758E+02,1.551149E+02,1.508733E+02,1.467476E+02,1.427348E+02,1.388317E+02,1.350354E+02,
1.313428E+02,1.277512E+02,1.242579E+02,1.208600E+02,1.175551E+02,1.143406E+02,1.112139E+02,
1.081728E+02,1.052148E+02,1.023377E+02,9.953925E+01,9.681734E+01,9.416987E+01,9.159479E+01,
8.909012E+01,8.665395E+01,8.428439E+01,8.197963E+01,7.973789E+01,7.755746E+01,7.543664E+01,
7.337382E+01,7.136741E+01,6.941587E+01,6.751769E+01,6.567141E+01,6.387562E+01,6.212894E+01,
6.043002E+01,5.877756E+01,5.717028E+01,5.560696E+01,5.408638E+01,5.260739E+01,5.116884E+01,
4.976962E+01,4.840867E+01,4.708493E+01,4.579739E+01,4.454506E+01,4.332697E+01,4.214220E+01,
4.098981E+01,3.986895E+01,3.877873E+01,3.771832E+01,3.668691E+01,3.568371E+01,3.470793E+01,
3.375884E+01,3.283571E+01,3.193781E+01,3.106447E+01,3.021501E+01,2.938878E+01,2.858514E+01,
2.780348E+01,2.704319E+01,2.630369E+01,2.558442E+01,2.488481E+01,2.420434E+01,2.354247E+01,
2.289870E+01,2.227253E+01,2.166349E+01,2.107110E+01,2.049491E+01,1.993447E+01,1.938936E+01,
1.885916E+01,1.834346E+01,1.784185E+01,1.735397E+01,1.687942E+01,1.641785E+01,1.596891E+01,
1.553224E+01,1.510751E+01,1.469439E+01,1.429257E+01,1.390174E+01,1.352160E+01,1.315185E+01,
1.279221E+01,1.244241E+01,1.210217E+01,1.177123E+01,1.144935E+01,1.113627E+01,1.083174E+01,
1.053555E+01,1.024745E+01,9.967237E+00,9.694682E+00,9.429580E+00,9.171728E+00,8.920927E+00,
8.676983E+00,8.439711E+00,8.208926E+00,7.984453E+00,7.766118E+00,7.553753E+00,7.347195E+00,
7.146286E+00,6.950870E+00,6.760798E+00,6.575924E+00,6.396105E+00,6.221203E+00,6.051084E+00,
5.885617E+00,5.724674E+00,5.568133E+00,5.415872E+00,5.267774E+00,5.123727E+00,4.983618E+00,
4.847341E+00,4.714790E+00,4.585864E+00,4.460463E+00,4.338492E+00,4.219855E+00,4.104463E+00,
3.992226E+00,3.883059E+00,3.776876E+00,3.673597E+00,3.573143E+00,3.475435E+00,3.380399E+00,
3.287962E+00,3.198052E+00,3.110601E+00,3.025542E+00,2.942808E+00,2.862337E+00,2.784066E+00,
2.707936E+00,2.633887E+00,2.561863E+00,2.491809E+00,2.423670E+00,2.357395E+00,2.292932E+00,
2.230232E+00,2.169246E+00,2.109928E+00,2.052232E+00,1.996113E+00,1.941529E+00,1.888438E+00,
1.836799E+00,1.786571E+00,1.737718E+00,1.690200E+00,1.643981E+00,1.599026E+00,1.555301E+00,
1.512771E+00,1.471404E+00,1.431169E+00,1.392033E+00,1.353968E+00,1.316944E+00,1.280932E+00,
1.245905E+00,1.211835E+00,1.178698E+00,1.146466E+00,1.115116E+00,1.084623E+00,1.054964E+00,
1.026116E+00,9.980566E-01,9.707647E-01,9.442191E-01,9.183994E-01,8.932857E-01,8.688588E-01,
8.450998E-01,8.219905E-01,7.995131E-01,7.776504E-01,7.563855E-01,7.357021E-01,7.155843E-01,
6.960166E-01,6.769840E-01,6.584718E-01,6.404659E-01,6.229523E-01,6.059176E-01,5.893488E-01,
5.732330E-01,5.575579E-01,5.423115E-01,5.274819E-01,5.130579E-01,4.990283E-01,4.853824E-01,
4.721095E-01,4.591997E-01,4.466428E-01,4.344294E-01,4.225499E-01,4.109952E-01,3.997565E-01,
3.888252E-01,3.781927E-01,3.678510E-01,3.577921E-01,3.480083E-01,3.384920E-01,3.292359E-01,
3.202329E-01,3.114761E-01,3.029588E-01,2.946744E-01,2.866165E-01,2.787790E-01,2.711557E-01,
2.637410E-01,2.565290E-01,2.495142E-01,2.426912E-01,2.360548E-01,2.295998E-01,2.233214E-01,
2.172147E-01,2.112749E-01,2.054976E-01,1.998783E-01,1.944126E-01,1.890964E-01,1.839255E-01,
1.788961E-01,1.740041E-01,1.692460E-01,1.646180E-01,1.601165E-01,1.557381E-01,1.514794E-01,
1.473372E-01,1.433082E-01,1.393895E-01,1.355779E-01,1.318705E-01,1.282645E-01,1.247571E-01,
1.213456E-01,1.180274E-01,1.147999E-01,1.116607E-01,1.086073E-01,1.056375E-01,1.027488E-01,
9.993914E-02,9.720630E-02,9.454818E-02,9.196276E-02,8.944803E-02,8.700207E-02,8.462300E-02,
8.230898E-02,8.005823E-02,7.786904E-02,7.573970E-02,7.366860E-02,7.165412E-02,6.969474E-02,
6.778893E-02,6.593524E-02,6.413224E-02,6.237854E-02,6.067279E-02,5.901369E-02,5.739996E-02,
5.583036E-02,5.430367E-02,5.281874E-02,5.137440E-02,4.996957E-02,4.860315E-02,4.727409E-02,
4.598138E-02,4.472402E-02,4.350104E-02,4.231150E-02,4.115449E-02,4.002912E-02,3.893452E-02,
3.786985E-02,3.683430E-02,3.582706E-02,3.484737E-02,3.389447E-02,3.296762E-02,3.206612E-02,
3.118927E-02,3.033640E-02,2.950685E-02,2.869998E-02,2.791518E-02,2.715184E-02,2.640937E-02,
2.568720E-02,2.498478E-02,2.430157E-02,2.363705E-02,2.299069E-02,2.236201E-02,2.175052E-02,
2.115575E-02,2.057724E-02,2.001456E-02,1.946726E-02,1.893493E-02,1.841715E-02,1.791353E-02,
1.742368E-02,1.694723E-02],
[3.000000E+02,2.941172E+02,2.883497E+02,2.826954E+02,2.771519E+02,2.717171E+02,2.663889E+02,
2.611652E+02,2.560439E+02,2.510230E+02,2.461006E+02,2.412747E+02,2.365435E+02,2.319050E+02,
2.273575E+02,2.228991E+02,2.185282E+02,2.142430E+02,2.100418E+02,2.059231E+02,2.018850E+02,
1.979262E+02,1.940450E+02,1.902399E+02,1.865094E+02,1.828520E+02,1.792664E+02,1.757511E+02,
1.723048E+02,1.689260E+02,1.656134E+02,1.623658E+02,1.591820E+02,1.560605E+02,1.530002E+02,
1.500000E+02,1.470586E+02,1.441749E+02,1.413477E+02,1.385759E+02,1.358585E+02,1.331944E+02,
1.305826E+02,1.280219E+02,1.255115E+02,1.230503E+02,1.206374E+02,1.182717E+02,1.159525E+02,
1.136787E+02,1.114496E+02,1.092641E+02,1.071215E+02,1.050209E+02,1.029615E+02,1.009425E+02,
9.896309E+01,9.702249E+01,9.511994E+01,9.325469E+01,9.142602E+01,8.963322E+01,8.787556E+01,
8.615238E+01,8.446298E+01,8.280671E+01,8.118292E+01,7.959098E+01,7.803025E+01,7.650012E+01,
7.500000E+01,7.352930E+01,7.208743E+01,7.067384E+01,6.928797E+01,6.792927E+01,6.659722E+01,
6.529129E+01,6.401097E+01,6.275575E+01,6.152515E+01,6.031868E+01,5.913587E+01,5.797625E+01,
5.683937E+01,5.572479E+01,5.463206E+01,5.356076E+01,5.251046E+01,5.148076E+01,5.047126E+01,
4.948155E+01,4.851124E+01,4.755997E+01,4.662735E+01,4.571301E+01,4.481661E+01,4.393778E+01,
4.307619E+01,4.223149E+01,4.140336E+01,4.059146E+01,3.979549E+01,3.901512E+01,3.825006E+01,
3.750000E+01,3.676465E+01,3.604372E+01,3.533692E+01,3.464398E+01,3.396464E+01,3.329861E+01,
3.264565E+01,3.200548E+01,3.137788E+01,3.076258E+01,3.015934E+01,2.956793E+01,2.898813E+01,
2.841969E+01,2.786239E+01,2.731603E+01,2.678038E+01,2.625523E+01,2.574038E+01,2.523563E+01,
2.474077E+01,2.425562E+01,2.377998E+01,2.331367E+01,2.285651E+01,2.240830E+01,2.196889E+01,
2.153809E+01,2.111575E+01,2.070168E+01,2.029573E+01,1.989774E+01,1.950756E+01,1.912503E+01,
1.875000E+01,1.838232E+01,1.802186E+01,1.766846E+01,1.732199E+01,1.698232E+01,1.664931E+01,
1.632282E+01,1.600274E+01,1.568894E+01,1.538129E+01,1.507967E+01,1.478397E+01,1.449406E+01,
1.420984E+01,1.393120E+01,1.365801E+01,1.339019E+01,1.312762E+01,1.287019E+01,1.261781E+01,
1.237039E+01,1.212781E+01,1.188999E+01,1.165684E+01,1.142825E+01,1.120415E+01,1.098445E+01,
1.076905E+01,1.055787E+01,1.035084E+01,1.014787E+01,9.948872E+00,9.753781E+00,9.562515E+00,
9.375000E+00,9.191162E+00,9.010929E+00,8.834230E+00,8.660996E+00,8.491159E+00,8.324653E+00,
8.161412E+00,8.001371E+00,7.844469E+00,7.690644E+00,7.539835E+00,7.391984E+00,7.247031E+00,
7.104921E+00,6.965598E+00,6.829007E+00,6.695094E+00,6.563808E+00,6.435095E+00,6.308907E+00,
6.185193E+00,6.063905E+00,5.944996E+00,5.828418E+00,5.714127E+00,5.602076E+00,5.492223E+00,
5.384524E+00,5.278936E+00,5.175420E+00,5.073933E+00,4.974436E+00,4.876890E+00,4.781258E+00,
4.687500E+00,4.595581E+00,4.505464E+00,4.417115E+00,4.330498E+00,4.245580E+00,4.162326E+00,
4.080706E+00,4.000686E+00,3.922235E+00,3.845322E+00,3.769918E+00,3.695992E+00,3.623516E+00,
3.552461E+00,3.482799E+00,3.414504E+00,3.347547E+00,3.281904E+00,3.217548E+00,3.154454E+00,
3.092597E+00,3.031953E+00,2.972498E+00,2.914209E+00,2.857063E+00,2.801038E+00,2.746111E+00,
2.692262E+00,2.639468E+00,2.587710E+00,2.536966E+00,2.487218E+00,2.438445E+00,2.390629E+00,
2.343750E+00,2.297790E+00,2.252732E+00,2.208558E+00,2.165249E+00,2.122790E+00,2.081163E+00,
2.040353E+00,2.000343E+00,1.961117E+00,1.922661E+00,1.884959E+00,1.847996E+00,1.811758E+00,
1.776230E+00,1.741400E+00,1.707252E+00,1.673774E+00,1.640952E+00,1.608774E+00,1.577227E+00,
1.546298E+00,1.515976E+00,1.486249E+00,1.457105E+00,1.428532E+00,1.400519E+00,1.373056E+00,
1.346131E+00,1.319734E+00,1.293855E+00,1.268483E+00,1.243609E+00,1.219223E+00,1.195314E+00,
1.171875E+00,1.148895E+00,1.126366E+00,1.104279E+00,1.082625E+00,1.061395E+00,1.040582E+00,
1.020176E+00,1.000171E+00,9.805587E-01,9.613305E-01,9.424794E-01,9.239979E-01,9.058789E-01,
8.881152E-01,8.706998E-01,8.536259E-01,8.368868E-01,8.204760E-01,8.043869E-01,7.886134E-01,
7.731492E-01,7.579882E-01,7.431245E-01,7.285523E-01,7.142658E-01,7.002595E-01,6.865278E-01,
6.730654E-01,6.598670E-01,6.469274E-01,6.342416E-01,6.218045E-01,6.096113E-01,5.976572E-01,
5.859375E-01,5.744476E-01,5.631831E-01,5.521394E-01,5.413123E-01,5.306975E-01,5.202908E-01,
5.100882E-01,5.000857E-01,4.902793E-01,4.806652E-01,4.712397E-01,4.619990E-01,4.529395E-01,
4.440576E-01,4.353499E-01,4.268129E-01,4.184434E-01,4.102380E-01,4.021935E-01,3.943067E-01,
3.865746E-01,3.789941E-01,3.715622E-01,3.642761E-01,3.571329E-01,3.501297E-01,3.432639E-01,
3.365327E-01,3.299335E-01,3.234637E-01,3.171208E-01,3.109023E-01,3.048056E-01,2.988286E-01,
2.929687E-01,2.872238E-01,2.815915E-01,2.760697E-01,2.706561E-01,2.653487E-01,2.601454E-01,
2.550441E-01,2.500429E-01,2.451397E-01,2.403326E-01,2.356198E-01,2.309995E-01,2.264697E-01,
2.220288E-01,2.176749E-01]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
# internally specified variable (from internal database)
food_multiplier = pd.Series([15., 110., 240.])
# input variables that change per simulation
ted_empty.foliar_diss_hlife = pd.Series([15., 25., 35.])
ted_empty.app_rate_min = pd.Series([0.18, 0.5, 1.25]) # lbs a.i./acre
# application scenarios generated from 'daily_app_flag' tests and reused here
daily_flag = pd.Series([[True, False, False, True, False, False, True, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, True, False, False,
False, False, False, False, True, False, False, False, False, False,
False, True, False, False, False, False, False, False, True, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False]], dtype='bool')
for i in range(3):
result[i] = ted_empty.daily_plant_timeseries(i, ted_empty.app_rate_min[i], food_multiplier[i], daily_flag[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_daily_soil_h2o_timeseries(self):
"""
:description generates annual timeseries of daily pesticide concentrations in soil pore water and surface puddles
:param i; simulation number/index
:param application rate; active ingredient application rate (lbs a.i./acre)
:param food_multiplier; factor by which application rate of active ingredient is multiplied to estimate dietary based EECs
:param daily_flag; daily flag denoting if pesticide is applied (0 - not applied, 1 - applied)
:param water_type; type of water (pore water or surface puddles)
:Notes # calculations are performed daily from day of first application (assumed day 0) through the last day of a year
# note: day numbers are synchronized with 0-based array indexing; thus the year does not have a calendar specific
# assoication, rather it is one year from the day of 1st pesticide application
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [[2.235571E-02,2.134616E-02,2.038220E-02,4.181749E-02,3.992908E-02,3.812594E-02,
5.875995E-02,5.610644E-02,5.357277E-02,5.115350E-02,4.884349E-02,4.663780E-02,
4.453171E-02,4.252073E-02,4.060056E-02,3.876711E-02,3.701645E-02,3.534484E-02,
3.374873E-02,3.222469E-02,3.076947E-02,2.937997E-02,2.805322E-02,2.678638E-02,
2.557675E-02,2.442175E-02,2.331890E-02,2.226586E-02,2.126037E-02,2.030028E-02,
1.938355E-02,1.850822E-02,1.767242E-02,1.687436E-02,1.611234E-02,1.538474E-02,
1.468999E-02,1.402661E-02,1.339319E-02,1.278838E-02,1.221087E-02,1.165945E-02,
1.113293E-02,1.063018E-02,1.015014E-02,9.691777E-03,9.254112E-03,8.836211E-03,
8.437182E-03,8.056172E-03,7.692368E-03,7.344993E-03,7.013305E-03,6.696596E-03,
6.394188E-03,6.105437E-03,5.829725E-03,5.566464E-03,5.315091E-03,5.075070E-03,
4.845888E-03,4.627056E-03,4.418105E-03,4.218591E-03,4.028086E-03,3.846184E-03,
3.672497E-03,3.506653E-03,3.348298E-03,3.197094E-03,3.052718E-03,2.914863E-03,
2.783232E-03,2.657546E-03,2.537535E-03,2.422944E-03,2.313528E-03,2.209053E-03,
2.109295E-03,2.014043E-03,1.923092E-03,1.836248E-03,1.753326E-03,1.674149E-03,
1.598547E-03,1.526359E-03,1.457431E-03,1.391616E-03,1.328773E-03,1.268768E-03,
1.211472E-03,1.156764E-03,1.104526E-03,1.054648E-03,1.007022E-03,9.615460E-04,
9.181242E-04,8.766632E-04,8.370745E-04,7.992735E-04,7.631796E-04,7.287156E-04,
6.958080E-04,6.643864E-04,6.343838E-04,6.057361E-04,5.783820E-04,5.522632E-04,
5.273239E-04,5.035108E-04,4.807730E-04,4.590621E-04,4.383316E-04,4.185372E-04,
3.996368E-04,3.815898E-04,3.643578E-04,3.479040E-04,3.321932E-04,3.171919E-04,
3.028680E-04,2.891910E-04,2.761316E-04,2.636619E-04,2.517554E-04,2.403865E-04,
2.295310E-04,2.191658E-04,2.092686E-04,1.998184E-04,1.907949E-04,1.821789E-04,
1.739520E-04,1.660966E-04,1.585960E-04,1.514340E-04,1.445955E-04,1.380658E-04,
1.318310E-04,1.258777E-04,1.201933E-04,1.147655E-04,1.095829E-04,1.046343E-04,
9.990919E-05,9.539745E-05,9.108945E-05,8.697600E-05,8.304830E-05,7.929798E-05,
7.571701E-05,7.229775E-05,6.903290E-05,6.591548E-05,6.293885E-05,6.009663E-05,
5.738276E-05,5.479145E-05,5.231715E-05,4.995459E-05,4.769873E-05,4.554473E-05,
4.348800E-05,4.152415E-05,3.964899E-05,3.785850E-05,3.614887E-05,3.451645E-05,
3.295774E-05,3.146942E-05,3.004831E-05,2.869138E-05,2.739572E-05,2.615858E-05,
2.497730E-05,2.384936E-05,2.277236E-05,2.174400E-05,2.076208E-05,1.982449E-05,
1.892925E-05,1.807444E-05,1.725822E-05,1.647887E-05,1.573471E-05,1.502416E-05,
1.434569E-05,1.369786E-05,1.307929E-05,1.248865E-05,1.192468E-05,1.138618E-05,
1.087200E-05,1.038104E-05,9.912247E-06,9.464626E-06,9.037219E-06,8.629112E-06,
8.239435E-06,7.867356E-06,7.512079E-06,7.172845E-06,6.848931E-06,6.539644E-06,
6.244324E-06,5.962341E-06,5.693091E-06,5.436000E-06,5.190519E-06,4.956124E-06,
4.732313E-06,4.518609E-06,4.314556E-06,4.119718E-06,3.933678E-06,3.756039E-06,
3.586423E-06,3.424465E-06,3.269822E-06,3.122162E-06,2.981170E-06,2.846545E-06,
2.718000E-06,2.595260E-06,2.478062E-06,2.366156E-06,2.259305E-06,2.157278E-06,
2.059859E-06,1.966839E-06,1.878020E-06,1.793211E-06,1.712233E-06,1.634911E-06,
1.561081E-06,1.490585E-06,1.423273E-06,1.359000E-06,1.297630E-06,1.239031E-06,
1.183078E-06,1.129652E-06,1.078639E-06,1.029929E-06,9.834195E-07,9.390098E-07,
8.966056E-07,8.561164E-07,8.174555E-07,7.805405E-07,7.452926E-07,7.116364E-07,
6.795000E-07,6.488149E-07,6.195154E-07,5.915391E-07,5.648262E-07,5.393195E-07,
5.149647E-07,4.917097E-07,4.695049E-07,4.483028E-07,4.280582E-07,4.087278E-07,
3.902703E-07,3.726463E-07,3.558182E-07,3.397500E-07,3.244074E-07,3.097577E-07,
2.957696E-07,2.824131E-07,2.696598E-07,2.574824E-07,2.458549E-07,2.347525E-07,
2.241514E-07,2.140291E-07,2.043639E-07,1.951351E-07,1.863231E-07,1.779091E-07,
1.698750E-07,1.622037E-07,1.548789E-07,1.478848E-07,1.412065E-07,1.348299E-07,
1.287412E-07,1.229274E-07,1.173762E-07,1.120757E-07,1.070145E-07,1.021819E-07,
9.756757E-08,9.316157E-08,8.895455E-08,8.493750E-08,8.110186E-08,7.743943E-08,
7.394239E-08,7.060327E-08,6.741494E-08,6.437059E-08,6.146372E-08,5.868811E-08,
5.603785E-08,5.350727E-08,5.109097E-08,4.878378E-08,4.658079E-08,4.447727E-08,
4.246875E-08,4.055093E-08,3.871971E-08,3.697119E-08,3.530163E-08,3.370747E-08,
3.218529E-08,3.073186E-08,2.934406E-08,2.801893E-08,2.675364E-08,2.554549E-08,
2.439189E-08,2.329039E-08,2.223864E-08,2.123438E-08,2.027546E-08,1.935986E-08,
1.848560E-08,1.765082E-08,1.685373E-08,1.609265E-08,1.536593E-08,1.467203E-08,
1.400946E-08,1.337682E-08,1.277274E-08,1.219595E-08,1.164520E-08,1.111932E-08,
1.061719E-08,1.013773E-08,9.679929E-09,9.242799E-09,8.825409E-09,8.426867E-09,
8.046324E-09,7.682965E-09,7.336014E-09,7.004732E-09,6.688409E-09,6.386371E-09,
6.097973E-09,5.822598E-09,5.559659E-09,5.308594E-09,5.068866E-09,4.839964E-09,
4.621399E-09,4.412704E-09,4.213434E-09,4.023162E-09,3.841482E-09,3.668007E-09],
[9.391514E-02,8.762592E-02,8.175787E-02,7.628279E-02,7.117436E-02,6.640803E-02,
6.196088E-02,1.517267E-01,1.415660E-01,1.320858E-01,1.232404E-01,1.149873E-01,
1.072870E-01,1.001023E-01,1.873139E-01,1.747700E-01,1.630662E-01,1.521461E-01,
1.419574E-01,1.324509E-01,1.235811E-01,2.092203E-01,1.952095E-01,1.821369E-01,
1.699397E-01,1.585594E-01,1.479411E-01,1.380340E-01,2.227054E-01,2.077915E-01,
1.938763E-01,1.808930E-01,1.687791E-01,1.574765E-01,1.469307E-01,1.370912E-01,
1.279106E-01,1.193449E-01,1.113527E-01,1.038957E-01,9.693814E-02,9.044648E-02,
8.438955E-02,7.873824E-02,7.346537E-02,6.854562E-02,6.395532E-02,5.967242E-02,
5.567634E-02,5.194786E-02,4.846907E-02,4.522324E-02,4.219478E-02,3.936912E-02,
3.673269E-02,3.427281E-02,3.197766E-02,2.983621E-02,2.783817E-02,2.597393E-02,
2.423454E-02,2.261162E-02,2.109739E-02,1.968456E-02,1.836634E-02,1.713640E-02,
1.598883E-02,1.491811E-02,1.391909E-02,1.298697E-02,1.211727E-02,1.130581E-02,
1.054869E-02,9.842280E-03,9.183172E-03,8.568202E-03,7.994415E-03,7.459053E-03,
6.959543E-03,6.493483E-03,6.058634E-03,5.652905E-03,5.274347E-03,4.921140E-03,
4.591586E-03,4.284101E-03,3.997208E-03,3.729527E-03,3.479771E-03,3.246741E-03,
3.029317E-03,2.826453E-03,2.637174E-03,2.460570E-03,2.295793E-03,2.142051E-03,
1.998604E-03,1.864763E-03,1.739886E-03,1.623371E-03,1.514658E-03,1.413226E-03,
1.318587E-03,1.230285E-03,1.147896E-03,1.071025E-03,9.993019E-04,9.323816E-04,
8.699428E-04,8.116854E-04,7.573292E-04,7.066131E-04,6.592934E-04,6.151425E-04,
5.739482E-04,5.355126E-04,4.996509E-04,4.661908E-04,4.349714E-04,4.058427E-04,
3.786646E-04,3.533066E-04,3.296467E-04,3.075712E-04,2.869741E-04,2.677563E-04,
2.498255E-04,2.330954E-04,2.174857E-04,2.029213E-04,1.893323E-04,1.766533E-04,
1.648233E-04,1.537856E-04,1.434871E-04,1.338782E-04,1.249127E-04,1.165477E-04,
1.087429E-04,1.014607E-04,9.466615E-05,8.832664E-05,8.241167E-05,7.689281E-05,
7.174353E-05,6.693908E-05,6.245637E-05,5.827385E-05,5.437143E-05,5.073034E-05,
4.733308E-05,4.416332E-05,4.120584E-05,3.844640E-05,3.587176E-05,3.346954E-05,
3.122818E-05,2.913693E-05,2.718571E-05,2.536517E-05,2.366654E-05,2.208166E-05,
2.060292E-05,1.922320E-05,1.793588E-05,1.673477E-05,1.561409E-05,1.456846E-05,
1.359286E-05,1.268258E-05,1.183327E-05,1.104083E-05,1.030146E-05,9.611601E-06,
8.967941E-06,8.367385E-06,7.807046E-06,7.284232E-06,6.796428E-06,6.341292E-06,
5.916635E-06,5.520415E-06,5.150730E-06,4.805801E-06,4.483971E-06,4.183692E-06,
3.903523E-06,3.642116E-06,3.398214E-06,3.170646E-06,2.958317E-06,2.760208E-06,
2.575365E-06,2.402900E-06,2.241985E-06,2.091846E-06,1.951762E-06,1.821058E-06,
1.699107E-06,1.585323E-06,1.479159E-06,1.380104E-06,1.287682E-06,1.201450E-06,
1.120993E-06,1.045923E-06,9.758808E-07,9.105289E-07,8.495535E-07,7.926615E-07,
7.395793E-07,6.900519E-07,6.438412E-07,6.007251E-07,5.604963E-07,5.229616E-07,
4.879404E-07,4.552645E-07,4.247768E-07,3.963307E-07,3.697897E-07,3.450260E-07,
3.219206E-07,3.003625E-07,2.802482E-07,2.614808E-07,2.439702E-07,2.276322E-07,
2.123884E-07,1.981654E-07,1.848948E-07,1.725130E-07,1.609603E-07,1.501813E-07,
1.401241E-07,1.307404E-07,1.219851E-07,1.138161E-07,1.061942E-07,9.908269E-08,
9.244741E-08,8.625649E-08,8.048015E-08,7.509063E-08,7.006204E-08,6.537019E-08,
6.099255E-08,5.690806E-08,5.309710E-08,4.954134E-08,4.622371E-08,4.312824E-08,
4.024007E-08,3.754532E-08,3.503102E-08,3.268510E-08,3.049627E-08,2.845403E-08,
2.654855E-08,2.477067E-08,2.311185E-08,2.156412E-08,2.012004E-08,1.877266E-08,
1.751551E-08,1.634255E-08,1.524814E-08,1.422702E-08,1.327427E-08,1.238534E-08,
1.155593E-08,1.078206E-08,1.006002E-08,9.386329E-09,8.757755E-09,8.171274E-09,
7.624068E-09,7.113507E-09,6.637137E-09,6.192668E-09,5.777963E-09,5.391030E-09,
5.030009E-09,4.693165E-09,4.378877E-09,4.085637E-09,3.812034E-09,3.556754E-09,
3.318569E-09,3.096334E-09,2.888982E-09,2.695515E-09,2.515005E-09,2.346582E-09,
2.189439E-09,2.042819E-09,1.906017E-09,1.778377E-09,1.659284E-09,1.548167E-09,
1.444491E-09,1.347758E-09,1.257502E-09,1.173291E-09,1.094719E-09,1.021409E-09,
9.530086E-10,8.891884E-10,8.296421E-10,7.740835E-10,7.222454E-10,6.738788E-10,
6.287512E-10,5.866456E-10,5.473597E-10,5.107046E-10,4.765043E-10,4.445942E-10,
4.148211E-10,3.870417E-10,3.611227E-10,3.369394E-10,3.143756E-10,2.933228E-10,
2.736798E-10,2.553523E-10,2.382521E-10,2.222971E-10,2.074105E-10,1.935209E-10,
1.805614E-10,1.684697E-10,1.571878E-10,1.466614E-10,1.368399E-10,1.276762E-10,
1.191261E-10,1.111486E-10,1.037053E-10,9.676043E-11,9.028068E-11,8.423485E-11,
7.859390E-11,7.333070E-11,6.841996E-11,6.383808E-11,5.956303E-11,5.557428E-11,
5.185263E-11,4.838022E-11,4.514034E-11,4.211743E-11,3.929695E-11,3.666535E-11,
3.420998E-11,3.191904E-11,2.978152E-11,2.778714E-11,2.592632E-11,2.419011E-11,
2.257017E-11,2.105871E-11,1.964847E-11,1.833267E-11,1.710499E-11,1.595952E-11],
[1.172251E-01,1.132320E-01,1.093749E-01,1.056492E-01,1.020504E-01,9.857420E-02,
9.521640E-02,9.197298E-02,8.884005E-02,8.581383E-02,8.289069E-02,8.006713E-02,
7.733975E-02,7.470528E-02,7.216054E-02,6.970249E-02,6.732817E-02,6.503472E-02,
6.281940E-02,6.067954E-02,5.861257E-02,5.661601E-02,5.468746E-02,5.282461E-02,
5.102521E-02,4.928710E-02,4.760820E-02,4.598649E-02,4.442002E-02,4.290691E-02,
4.144535E-02,4.003357E-02,3.866988E-02,3.735264E-02,3.608027E-02,3.485124E-02,
3.366408E-02,3.251736E-02,3.140970E-02,3.033977E-02,2.930629E-02,2.830801E-02,
2.734373E-02,2.641230E-02,2.551260E-02,2.464355E-02,2.380410E-02,2.299325E-02,
2.221001E-02,2.145346E-02,2.072267E-02,2.001678E-02,1.933494E-02,1.867632E-02,
1.804014E-02,1.742562E-02,1.683204E-02,1.625868E-02,1.570485E-02,1.516989E-02,
1.465314E-02,1.415400E-02,1.367187E-02,1.320615E-02,1.275630E-02,1.232178E-02,
1.190205E-02,1.149662E-02,1.110501E-02,1.072673E-02,1.036134E-02,1.000839E-02,
9.667469E-03,9.338160E-03,9.020068E-03,8.712811E-03,8.416021E-03,8.129340E-03,
7.852425E-03,7.584943E-03,7.326572E-03,7.077002E-03,6.835933E-03,6.603076E-03,
6.378151E-03,6.160888E-03,5.951025E-03,5.748312E-03,5.552503E-03,5.363364E-03,
5.180668E-03,5.004196E-03,4.833735E-03,4.669080E-03,4.510034E-03,4.356406E-03,
4.208010E-03,4.064670E-03,3.926212E-03,3.792471E-03,3.663286E-03,3.538501E-03,
3.417966E-03,3.301538E-03,3.189075E-03,3.080444E-03,2.975513E-03,2.874156E-03,
2.776251E-03,2.681682E-03,2.590334E-03,2.502098E-03,2.416867E-03,2.334540E-03,
2.255017E-03,2.178203E-03,2.104005E-03,2.032335E-03,1.963106E-03,1.896236E-03,
1.831643E-03,1.769250E-03,1.708983E-03,1.650769E-03,1.594538E-03,1.540222E-03,
1.487756E-03,1.437078E-03,1.388126E-03,1.340841E-03,1.295167E-03,1.251049E-03,
1.208434E-03,1.167270E-03,1.127508E-03,1.089101E-03,1.052003E-03,1.016168E-03,
9.815531E-04,9.481178E-04,9.158214E-04,8.846252E-04,8.544916E-04,8.253845E-04,
7.972689E-04,7.701110E-04,7.438782E-04,7.185389E-04,6.940629E-04,6.704205E-04,
6.475836E-04,6.255245E-04,6.042168E-04,5.836350E-04,5.637542E-04,5.445507E-04,
5.260013E-04,5.080838E-04,4.907766E-04,4.740589E-04,4.579107E-04,4.423126E-04,
4.272458E-04,4.126923E-04,3.986344E-04,3.850555E-04,3.719391E-04,3.592695E-04,
3.470314E-04,3.352103E-04,3.237918E-04,3.127622E-04,3.021084E-04,2.918175E-04,
2.818771E-04,2.722753E-04,2.630006E-04,2.540419E-04,2.453883E-04,2.370295E-04,
2.289554E-04,2.211563E-04,2.136229E-04,2.063461E-04,1.993172E-04,1.925277E-04,
1.859695E-04,1.796347E-04,1.735157E-04,1.676051E-04,1.618959E-04,1.563811E-04,
1.510542E-04,1.459087E-04,1.409386E-04,1.361377E-04,1.315003E-04,1.270209E-04,
1.226941E-04,1.185147E-04,1.144777E-04,1.105782E-04,1.068115E-04,1.031731E-04,
9.965861E-05,9.626387E-05,9.298477E-05,8.981737E-05,8.675786E-05,8.380257E-05,
8.094794E-05,7.819056E-05,7.552710E-05,7.295437E-05,7.046928E-05,6.806884E-05,
6.575016E-05,6.351047E-05,6.134707E-05,5.925736E-05,5.723884E-05,5.528908E-05,
5.340573E-05,5.158653E-05,4.982930E-05,4.813194E-05,4.649239E-05,4.490868E-05,
4.337893E-05,4.190128E-05,4.047397E-05,3.909528E-05,3.776355E-05,3.647719E-05,
3.523464E-05,3.403442E-05,3.287508E-05,3.175523E-05,3.067354E-05,2.962868E-05,
2.861942E-05,2.764454E-05,2.670286E-05,2.579327E-05,2.491465E-05,2.406597E-05,
2.324619E-05,2.245434E-05,2.168946E-05,2.095064E-05,2.023699E-05,1.954764E-05,
1.888178E-05,1.823859E-05,1.761732E-05,1.701721E-05,1.643754E-05,1.587762E-05,
1.533677E-05,1.481434E-05,1.430971E-05,1.382227E-05,1.335143E-05,1.289663E-05,
1.245733E-05,1.203298E-05,1.162310E-05,1.122717E-05,1.084473E-05,1.047532E-05,
1.011849E-05,9.773820E-06,9.440888E-06,9.119297E-06,8.808660E-06,8.508605E-06,
8.218770E-06,7.938809E-06,7.668384E-06,7.407170E-06,7.154855E-06,6.911134E-06,
6.675716E-06,6.448316E-06,6.228663E-06,6.016492E-06,5.811548E-06,5.613585E-06,
5.422366E-06,5.237660E-06,5.059247E-06,4.886910E-06,4.720444E-06,4.559648E-06,
4.404330E-06,4.254302E-06,4.109385E-06,3.969404E-06,3.834192E-06,3.703585E-06,
3.577428E-06,3.455567E-06,3.337858E-06,3.224158E-06,3.114332E-06,3.008246E-06,
2.905774E-06,2.806793E-06,2.711183E-06,2.618830E-06,2.529623E-06,2.443455E-06,
2.360222E-06,2.279824E-06,2.202165E-06,2.127151E-06,2.054693E-06,1.984702E-06,
1.917096E-06,1.851793E-06,1.788714E-06,1.727784E-06,1.668929E-06,1.612079E-06,
1.557166E-06,1.504123E-06,1.452887E-06,1.403396E-06,1.355592E-06,1.309415E-06,
1.264812E-06,1.221728E-06,1.180111E-06,1.139912E-06,1.101082E-06,1.063576E-06,
1.027346E-06,9.923511E-07,9.585480E-07,9.258963E-07,8.943569E-07,8.638918E-07,
8.344645E-07,8.060396E-07,7.785829E-07,7.520615E-07,7.264435E-07,7.016982E-07,
6.777958E-07,6.547076E-07,6.324058E-07,6.108638E-07,5.900555E-07,5.699560E-07,
5.505412E-07,5.317878E-07,5.136731E-07,4.961755E-07,4.792740E-07,4.629482E-07,
4.471784E-07,4.319459E-07,4.172322E-07,4.030198E-07,3.892914E-07,3.760307E-07]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
ted_empty.app_rate_conv1 = 11.2
ted_empty.h2o_depth_puddles = 1.3
ted_empty.soil_depth = 2.6
ted_empty.soil_porosity = 0.4339623
ted_empty.soil_bulk_density = 1.5
ted_empty.h2o_depth_soil = 0.0
ted_empty.soil_foc = 0.015
# internally specified variable
water_type = ['puddles', 'pore_water', 'puddles']
# input variables that change per simulation
ted_empty.aerobic_soil_meta_hlife = pd.Series([15., 10., 20.], dtype='float')
ted_empty.koc = pd.Series([1500., 1000., 2000.], dtype='float')
ted_empty.app_rate_min = pd.Series([0.18, 0.5, 1.25]) # lbs a.i./acre
# application scenarios generated from 'daily_app_flag' tests and reused here
daily_flag = pd.Series([[True, False, False, True, False, False, True, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, True, False, False,
False, False, False, False, True, False, False, False, False, False,
False, True, False, False, False, False, False, False, True, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False]], dtype='bool')
for i in range(3):
result[i] = ted_empty.daily_soil_h2o_timeseries(i, ted_empty.app_rate_min[i], daily_flag[i], water_type[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_daily_plant_dew_timeseries(self):
"""
:description generates annual timeseries of daily pesticide concentrations in dew that resides on broad leaf plants
:param i; simulation number/index
:param blp_conc; daily values of pesticide concentration in broad leaf plant dew
:Notes # calculations are performed daily from day of first application (assumed day 0) through the last day of a year
# note: day numbers are synchronized with 0-based array indexing; thus the year does not have a calendar specific
# assoication, rather it is one year from the day of 1st pesticide application
#this represents Eq 11 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([[]], dtype='float')
expected_results = [[6.201749E+00,6.080137E+00,5.960909E+00,5.844019E+00,5.729422E+00,5.617071E+00,
5.506924E+00,1.160069E+01,1.137320E+01,1.115018E+01,1.093153E+01,1.071717E+01,
1.050702E+01,1.030098E+01,1.630073E+01,1.598109E+01,1.566771E+01,1.536047E+01,
1.505926E+01,1.476396E+01,1.447445E+01,2.039236E+01,1.999248E+01,1.960044E+01,
1.921609E+01,1.883927E+01,1.846984E+01,1.810766E+01,2.395433E+01,2.348460E+01,
2.302408E+01,2.257259E+01,2.212996E+01,2.169600E+01,2.127056E+01,2.085346E+01,
2.044453E+01,2.004363E+01,1.965059E+01,1.926525E+01,1.888747E+01,1.851710E+01,
1.815399E+01,1.779800E+01,1.744899E+01,1.710683E+01,1.677137E+01,1.644250E+01,
1.612007E+01,1.580396E+01,1.549406E+01,1.519023E+01,1.489236E+01,1.460033E+01,
1.431403E+01,1.403334E+01,1.375815E+01,1.348836E+01,1.322386E+01,1.296455E+01,
1.271032E+01,1.246108E+01,1.221673E+01,1.197717E+01,1.174230E+01,1.151204E+01,
1.128630E+01,1.106498E+01,1.084800E+01,1.063528E+01,1.042673E+01,1.022227E+01,
1.002181E+01,9.825293E+00,9.632625E+00,9.443735E+00,9.258549E+00,9.076994E+00,
8.899000E+00,8.724496E+00,8.553414E+00,8.385687E+00,8.221249E+00,8.060035E+00,
7.901982E+00,7.747029E+00,7.595115E+00,7.446179E+00,7.300164E+00,7.157013E+00,
7.016668E+00,6.879075E+00,6.744181E+00,6.611932E+00,6.482276E+00,6.355162E+00,
6.230541E+00,6.108364E+00,5.988583E+00,5.871150E+00,5.756021E+00,5.643149E+00,
5.532490E+00,5.424001E+00,5.317640E+00,5.213364E+00,5.111133E+00,5.010907E+00,
4.912646E+00,4.816312E+00,4.721867E+00,4.629274E+00,4.538497E+00,4.449500E+00,
4.362248E+00,4.276707E+00,4.192843E+00,4.110624E+00,4.030017E+00,3.950991E+00,
3.873515E+00,3.797557E+00,3.723090E+00,3.650082E+00,3.578506E+00,3.508334E+00,
3.439538E+00,3.372090E+00,3.305966E+00,3.241138E+00,3.177581E+00,3.115271E+00,
3.054182E+00,2.994291E+00,2.935575E+00,2.878010E+00,2.821574E+00,2.766245E+00,
2.712001E+00,2.658820E+00,2.606682E+00,2.555567E+00,2.505454E+00,2.456323E+00,
2.408156E+00,2.360934E+00,2.314637E+00,2.269249E+00,2.224750E+00,2.181124E+00,
2.138353E+00,2.096422E+00,2.055312E+00,2.015009E+00,1.975496E+00,1.936757E+00,
1.898779E+00,1.861545E+00,1.825041E+00,1.789253E+00,1.754167E+00,1.719769E+00,
1.686045E+00,1.652983E+00,1.620569E+00,1.588791E+00,1.557635E+00,1.527091E+00,
1.497146E+00,1.467788E+00,1.439005E+00,1.410787E+00,1.383122E+00,1.356000E+00,
1.329410E+00,1.303341E+00,1.277783E+00,1.252727E+00,1.228162E+00,1.204078E+00,
1.180467E+00,1.157319E+00,1.134624E+00,1.112375E+00,1.090562E+00,1.069177E+00,
1.048211E+00,1.027656E+00,1.007504E+00,9.877478E-01,9.683787E-01,9.493894E-01,
9.307724E-01,9.125205E-01,8.946266E-01,8.770835E-01,8.598844E-01,8.430226E-01,
8.264914E-01,8.102845E-01,7.943953E-01,7.788177E-01,7.635455E-01,7.485729E-01,
7.338938E-01,7.195026E-01,7.053936E-01,6.915612E-01,6.780002E-01,6.647050E-01,
6.516705E-01,6.388917E-01,6.263634E-01,6.140808E-01,6.020390E-01,5.902334E-01,
5.786593E-01,5.673121E-01,5.561875E-01,5.452810E-01,5.345884E-01,5.241054E-01,
5.138280E-01,5.037522E-01,4.938739E-01,4.841893E-01,4.746947E-01,4.653862E-01,
4.562603E-01,4.473133E-01,4.385417E-01,4.299422E-01,4.215113E-01,4.132457E-01,
4.051422E-01,3.971976E-01,3.894088E-01,3.817728E-01,3.742864E-01,3.669469E-01,
3.597513E-01,3.526968E-01,3.457806E-01,3.390001E-01,3.323525E-01,3.258353E-01,
3.194458E-01,3.131817E-01,3.070404E-01,3.010195E-01,2.951167E-01,2.893296E-01,
2.836561E-01,2.780937E-01,2.726405E-01,2.672942E-01,2.620527E-01,2.569140E-01,
2.518761E-01,2.469370E-01,2.420947E-01,2.373473E-01,2.326931E-01,2.281301E-01,
2.236566E-01,2.192709E-01,2.149711E-01,2.107557E-01,2.066229E-01,2.025711E-01,
1.985988E-01,1.947044E-01,1.908864E-01,1.871432E-01,1.834735E-01,1.798756E-01,
1.763484E-01,1.728903E-01,1.695000E-01,1.661762E-01,1.629176E-01,1.597229E-01,
1.565908E-01,1.535202E-01,1.505098E-01,1.475584E-01,1.446648E-01,1.418280E-01,
1.390469E-01,1.363202E-01,1.336471E-01,1.310264E-01,1.284570E-01,1.259380E-01,
1.234685E-01,1.210473E-01,1.186737E-01,1.163466E-01,1.140651E-01,1.118283E-01,
1.096354E-01,1.074856E-01,1.053778E-01,1.033114E-01,1.012856E-01,9.929941E-02,
9.735221E-02,9.544319E-02,9.357161E-02,9.173673E-02,8.993782E-02,8.817420E-02,
8.644516E-02,8.475002E-02,8.308812E-02,8.145882E-02,7.986146E-02,7.829542E-02,
7.676010E-02,7.525488E-02,7.377918E-02,7.233241E-02,7.091402E-02,6.952344E-02,
6.816012E-02,6.682355E-02,6.551318E-02,6.422850E-02,6.296902E-02,6.173424E-02,
6.052367E-02,5.933684E-02,5.817328E-02,5.703253E-02,5.591416E-02,5.481772E-02,
5.374278E-02,5.268891E-02,5.165572E-02,5.064278E-02,4.964970E-02,4.867610E-02,
4.772160E-02,4.678580E-02,4.586836E-02,4.496891E-02,4.408710E-02,4.322258E-02,
4.237501E-02,4.154406E-02,4.072941E-02,3.993073E-02,3.914771E-02,3.838005E-02,
3.762744E-02,3.688959E-02,3.616621E-02,3.545701E-02,3.476172E-02,3.408006E-02,
3.341177E-02,3.275659E-02,3.211425E-02,3.148451E-02,3.086712E-02,3.026183E-02],
[3.487500E-01,3.419112E-01,3.352066E-01,3.286334E-01,3.221891E-01,3.158711E-01,
3.096771E-01,6.523545E-01,6.395622E-01,6.270208E-01,6.147253E-01,6.026709E-01,
5.908529E-01,5.792667E-01,9.166576E-01,8.986825E-01,8.810599E-01,8.637828E-01,
8.468446E-01,8.302385E-01,8.139580E-01,1.000000E+00,1.000000E+00,1.000000E+00,
1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,
1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,
1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,1.000000E+00,
1.000000E+00,1.000000E+00,9.812289E-01,9.619876E-01,9.431236E-01,9.246296E-01,
9.064981E-01,8.887223E-01,8.712950E-01,8.542094E-01,8.374589E-01,8.210368E-01,
8.049368E-01,7.891525E-01,7.736777E-01,7.585063E-01,7.436325E-01,7.290503E-01,
7.147541E-01,7.007382E-01,6.869971E-01,6.735255E-01,6.603181E-01,6.473697E-01,
6.346751E-01,6.222295E-01,6.100280E-01,5.980657E-01,5.863380E-01,5.748403E-01,
5.635680E-01,5.525168E-01,5.416823E-01,5.310602E-01,5.206465E-01,5.104369E-01,
5.004275E-01,4.906145E-01,4.809938E-01,4.715618E-01,4.623148E-01,4.532491E-01,
4.443611E-01,4.356475E-01,4.271047E-01,4.187294E-01,4.105184E-01,4.024684E-01,
3.945762E-01,3.868388E-01,3.792532E-01,3.718162E-01,3.645251E-01,3.573770E-01,
3.503691E-01,3.434986E-01,3.367628E-01,3.301591E-01,3.236848E-01,3.173376E-01,
3.111148E-01,3.050140E-01,2.990329E-01,2.931690E-01,2.874201E-01,2.817840E-01,
2.762584E-01,2.708411E-01,2.655301E-01,2.603232E-01,2.552184E-01,2.502138E-01,
2.453072E-01,2.404969E-01,2.357809E-01,2.311574E-01,2.266245E-01,2.221806E-01,
2.178237E-01,2.135523E-01,2.093647E-01,2.052592E-01,2.012342E-01,1.972881E-01,
1.934194E-01,1.896266E-01,1.859081E-01,1.822626E-01,1.786885E-01,1.751845E-01,
1.717493E-01,1.683814E-01,1.650795E-01,1.618424E-01,1.586688E-01,1.555574E-01,
1.525070E-01,1.495164E-01,1.465845E-01,1.437101E-01,1.408920E-01,1.381292E-01,
1.354206E-01,1.327651E-01,1.301616E-01,1.276092E-01,1.251069E-01,1.226536E-01,
1.202485E-01,1.178905E-01,1.155787E-01,1.133123E-01,1.110903E-01,1.089119E-01,
1.067762E-01,1.046824E-01,1.026296E-01,1.006171E-01,9.864406E-02,9.670971E-02,
9.481329E-02,9.295406E-02,9.113129E-02,8.934426E-02,8.759227E-02,8.587464E-02,
8.419069E-02,8.253976E-02,8.092121E-02,7.933439E-02,7.777869E-02,7.625350E-02,
7.475822E-02,7.329225E-02,7.185504E-02,7.044600E-02,6.906460E-02,6.771029E-02,
6.638253E-02,6.508081E-02,6.380461E-02,6.255344E-02,6.132681E-02,6.012423E-02,
5.894523E-02,5.778935E-02,5.665613E-02,5.554514E-02,5.445593E-02,5.338809E-02,
5.234118E-02,5.131480E-02,5.030855E-02,4.932203E-02,4.835485E-02,4.740664E-02,
4.647703E-02,4.556564E-02,4.467213E-02,4.379614E-02,4.293732E-02,4.209535E-02,
4.126988E-02,4.046060E-02,3.966720E-02,3.888935E-02,3.812675E-02,3.737911E-02,
3.664613E-02,3.592752E-02,3.522300E-02,3.453230E-02,3.385514E-02,3.319126E-02,
3.254040E-02,3.190231E-02,3.127672E-02,3.066340E-02,3.006211E-02,2.947261E-02,
2.889467E-02,2.832807E-02,2.777257E-02,2.722797E-02,2.669404E-02,2.617059E-02,
2.565740E-02,2.515427E-02,2.466101E-02,2.417743E-02,2.370332E-02,2.323851E-02,
2.278282E-02,2.233606E-02,2.189807E-02,2.146866E-02,2.104767E-02,2.063494E-02,
2.023030E-02,1.983360E-02,1.944467E-02,1.906338E-02,1.868955E-02,1.832306E-02,
1.796376E-02,1.761150E-02,1.726615E-02,1.692757E-02,1.659563E-02,1.627020E-02,
1.595115E-02,1.563836E-02,1.533170E-02,1.503106E-02,1.473631E-02,1.444734E-02,
1.416403E-02,1.388629E-02,1.361398E-02,1.334702E-02,1.308529E-02,1.282870E-02,
1.257714E-02,1.233051E-02,1.208871E-02,1.185166E-02,1.161926E-02,1.139141E-02,
1.116803E-02,1.094903E-02,1.073433E-02,1.052384E-02,1.031747E-02,1.011515E-02,
9.916799E-03,9.722337E-03,9.531688E-03,9.344777E-03,9.161532E-03,8.981880E-03,
8.805750E-03,8.633075E-03,8.463786E-03,8.297816E-03,8.135101E-03,7.975577E-03,
7.819180E-03,7.665851E-03,7.515528E-03,7.368153E-03,7.223668E-03,7.082017E-03,
6.943143E-03,6.806992E-03,6.673511E-03,6.542647E-03,6.414350E-03,6.288569E-03,
6.165254E-03,6.044357E-03,5.925831E-03,5.809629E-03,5.695705E-03,5.584016E-03,
5.474517E-03,5.367165E-03,5.261918E-03,5.158735E-03,5.057576E-03,4.958400E-03,
4.861168E-03,4.765844E-03,4.672389E-03,4.580766E-03,4.490940E-03,4.402875E-03,
4.316538E-03,4.231893E-03,4.148908E-03,4.067550E-03,3.987788E-03,3.909590E-03,
3.832926E-03,3.757764E-03,3.684077E-03,3.611834E-03,3.541008E-03,3.471571E-03,
3.403496E-03,3.336755E-03,3.271324E-03,3.207175E-03,3.144284E-03,3.082627E-03,
3.022178E-03,2.962915E-03,2.904814E-03,2.847853E-03,2.792008E-03,2.737258E-03,
2.683583E-03,2.630959E-03,2.579368E-03,2.528788E-03,2.479200E-03,2.430584E-03,
2.382922E-03,2.336194E-03,2.290383E-03,2.245470E-03,2.201438E-03,2.158269E-03,
2.115946E-03,2.074454E-03,2.033775E-03,1.993894E-03,1.954795E-03,1.916463E-03,
1.878882E-03,1.842038E-03,1.805917E-03,1.770504E-03,1.735786E-03,1.701748E-03],
[8.718750E-02,8.547781E-02,8.380164E-02,8.215834E-02,8.054726E-02,7.896778E-02,
7.741927E-02,1.630886E-01,1.598906E-01,1.567552E-01,1.536813E-01,1.506677E-01,
1.477132E-01,1.448167E-01,2.291644E-01,2.246706E-01,2.202650E-01,2.159457E-01,
2.117111E-01,2.075596E-01,2.034895E-01,2.866867E-01,2.810649E-01,2.755534E-01,
2.701500E-01,2.648525E-01,2.596589E-01,2.545672E-01,3.367628E-01,3.301591E-01,
3.236848E-01,3.173376E-01,3.111148E-01,3.050140E-01,2.990329E-01,3.803565E-01,
3.728980E-01,3.655857E-01,3.584167E-01,3.513884E-01,3.444979E-01,3.377425E-01,
4.183071E-01,4.101043E-01,4.020624E-01,3.941782E-01,3.864486E-01,3.788706E-01,
3.714412E-01,3.641575E-01,3.570166E-01,3.500157E-01,3.431521E-01,3.364231E-01,
3.298260E-01,3.233583E-01,3.170175E-01,3.108010E-01,3.047063E-01,2.987312E-01,
2.928733E-01,2.871302E-01,2.814998E-01,2.759797E-01,2.705680E-01,2.652623E-01,
2.600606E-01,2.549610E-01,2.499614E-01,2.450598E-01,2.402543E-01,2.355431E-01,
2.309242E-01,2.263959E-01,2.219565E-01,2.176040E-01,2.133369E-01,2.091535E-01,
2.050522E-01,2.010312E-01,1.970891E-01,1.932243E-01,1.894353E-01,1.857206E-01,
1.820787E-01,1.785083E-01,1.750078E-01,1.715760E-01,1.682115E-01,1.649130E-01,
1.616792E-01,1.585087E-01,1.554005E-01,1.523532E-01,1.493656E-01,1.464367E-01,
1.435651E-01,1.407499E-01,1.379899E-01,1.352840E-01,1.326311E-01,1.300303E-01,
1.274805E-01,1.249807E-01,1.225299E-01,1.201272E-01,1.177715E-01,1.154621E-01,
1.131980E-01,1.109782E-01,1.088020E-01,1.066685E-01,1.045768E-01,1.025261E-01,
1.005156E-01,9.854456E-02,9.661216E-02,9.471765E-02,9.286030E-02,9.103937E-02,
8.925414E-02,8.750392E-02,8.578802E-02,8.410577E-02,8.245651E-02,8.083959E-02,
7.925437E-02,7.770024E-02,7.617659E-02,7.468281E-02,7.321833E-02,7.178256E-02,
7.037495E-02,6.899494E-02,6.764199E-02,6.631557E-02,6.501516E-02,6.374025E-02,
6.249035E-02,6.126495E-02,6.006358E-02,5.888577E-02,5.773106E-02,5.659899E-02,
5.548911E-02,5.440101E-02,5.333424E-02,5.228838E-02,5.126304E-02,5.025780E-02,
4.927228E-02,4.830608E-02,4.735883E-02,4.643015E-02,4.551968E-02,4.462707E-02,
4.375196E-02,4.289401E-02,4.205289E-02,4.122825E-02,4.041979E-02,3.962719E-02,
3.885012E-02,3.808829E-02,3.734141E-02,3.660916E-02,3.589128E-02,3.518747E-02,
3.449747E-02,3.382099E-02,3.315779E-02,3.250758E-02,3.187013E-02,3.124517E-02,
3.063247E-02,3.003179E-02,2.944289E-02,2.886553E-02,2.829949E-02,2.774456E-02,
2.720050E-02,2.666712E-02,2.614419E-02,2.563152E-02,2.512890E-02,2.463614E-02,
2.415304E-02,2.367941E-02,2.321507E-02,2.275984E-02,2.231353E-02,2.187598E-02,
2.144701E-02,2.102644E-02,2.061413E-02,2.020990E-02,1.981359E-02,1.942506E-02,
1.904415E-02,1.867070E-02,1.830458E-02,1.794564E-02,1.759374E-02,1.724873E-02,
1.691050E-02,1.657889E-02,1.625379E-02,1.593506E-02,1.562259E-02,1.531624E-02,
1.501590E-02,1.472144E-02,1.443276E-02,1.414975E-02,1.387228E-02,1.360025E-02,
1.333356E-02,1.307210E-02,1.281576E-02,1.256445E-02,1.231807E-02,1.207652E-02,
1.183971E-02,1.160754E-02,1.137992E-02,1.115677E-02,1.093799E-02,1.072350E-02,
1.051322E-02,1.030706E-02,1.010495E-02,9.906796E-03,9.712530E-03,9.522073E-03,
9.335351E-03,9.152291E-03,8.972820E-03,8.796868E-03,8.624367E-03,8.455249E-03,
8.289446E-03,8.126895E-03,7.967532E-03,7.811293E-03,7.658119E-03,7.507948E-03,
7.360721E-03,7.216382E-03,7.074873E-03,6.936139E-03,6.800126E-03,6.666780E-03,
6.536048E-03,6.407880E-03,6.282226E-03,6.159035E-03,6.038260E-03,5.919853E-03,
5.803769E-03,5.689960E-03,5.578384E-03,5.468995E-03,5.361751E-03,5.256611E-03,
5.153532E-03,5.052474E-03,4.953398E-03,4.856265E-03,4.761037E-03,4.667676E-03,
4.576145E-03,4.486410E-03,4.398434E-03,4.312184E-03,4.227624E-03,4.144723E-03,
4.063448E-03,3.983766E-03,3.905647E-03,3.829059E-03,3.753974E-03,3.680361E-03,
3.608191E-03,3.537437E-03,3.468070E-03,3.400063E-03,3.333390E-03,3.268024E-03,
3.203940E-03,3.141113E-03,3.079517E-03,3.019130E-03,2.959927E-03,2.901884E-03,
2.844980E-03,2.789192E-03,2.734497E-03,2.680876E-03,2.628305E-03,2.576766E-03,
2.526237E-03,2.476699E-03,2.428133E-03,2.380518E-03,2.333838E-03,2.288073E-03,
2.243205E-03,2.199217E-03,2.156092E-03,2.113812E-03,2.072362E-03,2.031724E-03,
1.991883E-03,1.952823E-03,1.914530E-03,1.876987E-03,1.840180E-03,1.804096E-03,
1.768718E-03,1.734035E-03,1.700031E-03,1.666695E-03,1.634012E-03,1.601970E-03,
1.570556E-03,1.539759E-03,1.509565E-03,1.479963E-03,1.450942E-03,1.422490E-03,
1.394596E-03,1.367249E-03,1.340438E-03,1.314153E-03,1.288383E-03,1.263119E-03,
1.238350E-03,1.214066E-03,1.190259E-03,1.166919E-03,1.144036E-03,1.121602E-03,
1.099609E-03,1.078046E-03,1.056906E-03,1.036181E-03,1.015862E-03,9.959415E-04,
9.764117E-04,9.572648E-04,9.384935E-04,9.200902E-04,9.020478E-04,8.843592E-04,
8.670174E-04,8.500157E-04,8.333474E-04,8.170060E-04,8.009850E-04,7.852782E-04,
7.698794E-04,7.547825E-04,7.399817E-04,7.254711E-04,7.112450E-04,6.972980E-04]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
ted_empty.frac_pest_on_surface = 0.62
ted_empty.density_h2o = 1.0
ted_empty.mass_wax = 0.012
# input variables that change per simulation
ted_empty.solubility = pd.Series([145., 1., 20.], dtype='float')
ted_empty.log_kow = pd.Series([2.75, 4., 5.], dtype='float')
# internally calculated variables
blp_conc = pd.Series([[6.750000E+01,6.617637E+01,6.487869E+01,6.360646E+01,6.235917E+01,6.113635E+01,5.993750E+01,
1.262622E+02,1.237862E+02,1.213589E+02,1.189791E+02,1.166460E+02,1.143586E+02,1.121161E+02,
1.774176E+02,1.739385E+02,1.705277E+02,1.671838E+02,1.639054E+02,1.606913E+02,1.575403E+02,
2.219510E+02,2.175987E+02,2.133317E+02,2.091484E+02,2.050471E+02,2.010263E+02,1.970843E+02,
2.607196E+02,2.556070E+02,2.505947E+02,2.456807E+02,2.408631E+02,2.361399E+02,2.315093E+02,
2.269696E+02,2.225188E+02,2.181554E+02,2.138775E+02,2.096835E+02,2.055717E+02,2.015406E+02,
1.975885E+02,1.937139E+02,1.899153E+02,1.861912E+02,1.825401E+02,1.789606E+02,1.754513E+02,
1.720108E+02,1.686377E+02,1.653309E+02,1.620888E+02,1.589104E+02,1.557942E+02,1.527392E+02,
1.497441E+02,1.468077E+02,1.439289E+02,1.411065E+02,1.383395E+02,1.356267E+02,1.329672E+02,
1.303598E+02,1.278035E+02,1.252974E+02,1.228404E+02,1.204315E+02,1.180699E+02,1.157547E+02,
1.134848E+02,1.112594E+02,1.090777E+02,1.069387E+02,1.048417E+02,1.027859E+02,1.007703E+02,
9.879424E+01,9.685694E+01,9.495764E+01,9.309558E+01,9.127003E+01,8.948028E+01,8.772563E+01,
8.600538E+01,8.431887E+01,8.266543E+01,8.104441E+01,7.945518E+01,7.789711E+01,7.636959E+01,
7.487203E+01,7.340384E+01,7.196443E+01,7.055325E+01,6.916975E+01,6.781337E+01,6.648359E+01,
6.517989E+01,6.390175E+01,6.264868E+01,6.142018E+01,6.021576E+01,5.903497E+01,5.787733E+01,
5.674239E+01,5.562971E+01,5.453884E+01,5.346937E+01,5.242087E+01,5.139293E+01,5.038514E+01,
4.939712E+01,4.842847E+01,4.747882E+01,4.654779E+01,4.563501E+01,4.474014E+01,4.386281E+01,
4.300269E+01,4.215943E+01,4.133271E+01,4.052220E+01,3.972759E+01,3.894855E+01,3.818480E+01,
3.743602E+01,3.670192E+01,3.598222E+01,3.527663E+01,3.458487E+01,3.390669E+01,3.324180E+01,
3.258994E+01,3.195088E+01,3.132434E+01,3.071009E+01,3.010788E+01,2.951748E+01,2.893866E+01,
2.837119E+01,2.781485E+01,2.726942E+01,2.673468E+01,2.621043E+01,2.569646E+01,2.519257E+01,
2.469856E+01,2.421424E+01,2.373941E+01,2.327389E+01,2.281751E+01,2.237007E+01,2.193141E+01,
2.150135E+01,2.107972E+01,2.066636E+01,2.026110E+01,1.986379E+01,1.947428E+01,1.909240E+01,
1.871801E+01,1.835096E+01,1.799111E+01,1.763831E+01,1.729244E+01,1.695334E+01,1.662090E+01,
1.629497E+01,1.597544E+01,1.566217E+01,1.535504E+01,1.505394E+01,1.475874E+01,1.446933E+01,
1.418560E+01,1.390743E+01,1.363471E+01,1.336734E+01,1.310522E+01,1.284823E+01,1.259629E+01,
1.234928E+01,1.210712E+01,1.186970E+01,1.163695E+01,1.140875E+01,1.118503E+01,1.096570E+01,
1.075067E+01,1.053986E+01,1.033318E+01,1.013055E+01,9.931897E+00,9.737138E+00,9.546199E+00,
9.359004E+00,9.175480E+00,8.995554E+00,8.819157E+00,8.646218E+00,8.476671E+00,8.310449E+00,
8.147486E+00,7.987719E+00,7.831085E+00,7.677522E+00,7.526970E+00,7.379371E+00,7.234666E+00,
7.092799E+00,6.953713E+00,6.817355E+00,6.683671E+00,6.552608E+00,6.424116E+00,6.298143E+00,
6.174640E+00,6.053559E+00,5.934852E+00,5.818474E+00,5.704377E+00,5.592517E+00,5.482852E+00,
5.375336E+00,5.269929E+00,5.166589E+00,5.065275E+00,4.965948E+00,4.868569E+00,4.773100E+00,
4.679502E+00,4.587740E+00,4.497777E+00,4.409578E+00,4.323109E+00,4.238336E+00,4.155225E+00,
4.073743E+00,3.993859E+00,3.915542E+00,3.838761E+00,3.763485E+00,3.689686E+00,3.617333E+00,
3.546399E+00,3.476857E+00,3.408678E+00,3.341835E+00,3.276304E+00,3.212058E+00,3.149071E+00,
3.087320E+00,3.026779E+00,2.967426E+00,2.909237E+00,2.852188E+00,2.796259E+00,2.741426E+00,
2.687668E+00,2.634965E+00,2.583295E+00,2.532638E+00,2.482974E+00,2.434285E+00,2.386550E+00,
2.339751E+00,2.293870E+00,2.248889E+00,2.204789E+00,2.161555E+00,2.119168E+00,2.077612E+00,
2.036872E+00,1.996930E+00,1.957771E+00,1.919380E+00,1.881743E+00,1.844843E+00,1.808667E+00,
1.773200E+00,1.738428E+00,1.704339E+00,1.670918E+00,1.638152E+00,1.606029E+00,1.574536E+00,
1.543660E+00,1.513390E+00,1.483713E+00,1.454618E+00,1.426094E+00,1.398129E+00,1.370713E+00,
1.343834E+00,1.317482E+00,1.291647E+00,1.266319E+00,1.241487E+00,1.217142E+00,1.193275E+00,
1.169876E+00,1.146935E+00,1.124444E+00,1.102395E+00,1.080777E+00,1.059584E+00,1.038806E+00,
1.018436E+00,9.984649E-01,9.788856E-01,9.596902E-01,9.408713E-01,9.224214E-01,9.043333E-01,
8.865998E-01,8.692142E-01,8.521694E-01,8.354589E-01,8.190760E-01,8.030145E-01,7.872678E-01,
7.718300E-01,7.566949E-01,7.418565E-01,7.273092E-01,7.130471E-01,6.990647E-01,6.853565E-01,
6.719170E-01,6.587411E-01,6.458236E-01,6.331594E-01,6.207436E-01,6.085712E-01,5.966374E-01,
5.849378E-01,5.734675E-01,5.622221E-01,5.511973E-01,5.403887E-01,5.297920E-01,5.194031E-01,
5.092179E-01,4.992324E-01,4.894428E-01,4.798451E-01,4.704356E-01,4.612107E-01,4.521666E-01,
4.432999E-01,4.346071E-01,4.260847E-01,4.177294E-01,4.095380E-01,4.015072E-01,3.936339E-01,
3.859150E-01,3.783474E-01,3.709283E-01,3.636546E-01,3.565236E-01,3.495323E-01,3.426782E-01,
3.359585E-01,3.293706E-01],
[6.750000E+01,6.617637E+01,6.487869E+01,6.360646E+01,6.235917E+01,6.113635E+01,5.993750E+01,
1.262622E+02,1.237862E+02,1.213589E+02,1.189791E+02,1.166460E+02,1.143586E+02,1.121161E+02,
1.774176E+02,1.739385E+02,1.705277E+02,1.671838E+02,1.639054E+02,1.606913E+02,1.575403E+02,
2.219510E+02,2.175987E+02,2.133317E+02,2.091484E+02,2.050471E+02,2.010263E+02,1.970843E+02,
2.607196E+02,2.556070E+02,2.505947E+02,2.456807E+02,2.408631E+02,2.361399E+02,2.315093E+02,
2.269696E+02,2.225188E+02,2.181554E+02,2.138775E+02,2.096835E+02,2.055717E+02,2.015406E+02,
1.975885E+02,1.937139E+02,1.899153E+02,1.861912E+02,1.825401E+02,1.789606E+02,1.754513E+02,
1.720108E+02,1.686377E+02,1.653309E+02,1.620888E+02,1.589104E+02,1.557942E+02,1.527392E+02,
1.497441E+02,1.468077E+02,1.439289E+02,1.411065E+02,1.383395E+02,1.356267E+02,1.329672E+02,
1.303598E+02,1.278035E+02,1.252974E+02,1.228404E+02,1.204315E+02,1.180699E+02,1.157547E+02,
1.134848E+02,1.112594E+02,1.090777E+02,1.069387E+02,1.048417E+02,1.027859E+02,1.007703E+02,
9.879424E+01,9.685694E+01,9.495764E+01,9.309558E+01,9.127003E+01,8.948028E+01,8.772563E+01,
8.600538E+01,8.431887E+01,8.266543E+01,8.104441E+01,7.945518E+01,7.789711E+01,7.636959E+01,
7.487203E+01,7.340384E+01,7.196443E+01,7.055325E+01,6.916975E+01,6.781337E+01,6.648359E+01,
6.517989E+01,6.390175E+01,6.264868E+01,6.142018E+01,6.021576E+01,5.903497E+01,5.787733E+01,
5.674239E+01,5.562971E+01,5.453884E+01,5.346937E+01,5.242087E+01,5.139293E+01,5.038514E+01,
4.939712E+01,4.842847E+01,4.747882E+01,4.654779E+01,4.563501E+01,4.474014E+01,4.386281E+01,
4.300269E+01,4.215943E+01,4.133271E+01,4.052220E+01,3.972759E+01,3.894855E+01,3.818480E+01,
3.743602E+01,3.670192E+01,3.598222E+01,3.527663E+01,3.458487E+01,3.390669E+01,3.324180E+01,
3.258994E+01,3.195088E+01,3.132434E+01,3.071009E+01,3.010788E+01,2.951748E+01,2.893866E+01,
2.837119E+01,2.781485E+01,2.726942E+01,2.673468E+01,2.621043E+01,2.569646E+01,2.519257E+01,
2.469856E+01,2.421424E+01,2.373941E+01,2.327389E+01,2.281751E+01,2.237007E+01,2.193141E+01,
2.150135E+01,2.107972E+01,2.066636E+01,2.026110E+01,1.986379E+01,1.947428E+01,1.909240E+01,
1.871801E+01,1.835096E+01,1.799111E+01,1.763831E+01,1.729244E+01,1.695334E+01,1.662090E+01,
1.629497E+01,1.597544E+01,1.566217E+01,1.535504E+01,1.505394E+01,1.475874E+01,1.446933E+01,
1.418560E+01,1.390743E+01,1.363471E+01,1.336734E+01,1.310522E+01,1.284823E+01,1.259629E+01,
1.234928E+01,1.210712E+01,1.186970E+01,1.163695E+01,1.140875E+01,1.118503E+01,1.096570E+01,
1.075067E+01,1.053986E+01,1.033318E+01,1.013055E+01,9.931897E+00,9.737138E+00,9.546199E+00,
9.359004E+00,9.175480E+00,8.995554E+00,8.819157E+00,8.646218E+00,8.476671E+00,8.310449E+00,
8.147486E+00,7.987719E+00,7.831085E+00,7.677522E+00,7.526970E+00,7.379371E+00,7.234666E+00,
7.092799E+00,6.953713E+00,6.817355E+00,6.683671E+00,6.552608E+00,6.424116E+00,6.298143E+00,
6.174640E+00,6.053559E+00,5.934852E+00,5.818474E+00,5.704377E+00,5.592517E+00,5.482852E+00,
5.375336E+00,5.269929E+00,5.166589E+00,5.065275E+00,4.965948E+00,4.868569E+00,4.773100E+00,
4.679502E+00,4.587740E+00,4.497777E+00,4.409578E+00,4.323109E+00,4.238336E+00,4.155225E+00,
4.073743E+00,3.993859E+00,3.915542E+00,3.838761E+00,3.763485E+00,3.689686E+00,3.617333E+00,
3.546399E+00,3.476857E+00,3.408678E+00,3.341835E+00,3.276304E+00,3.212058E+00,3.149071E+00,
3.087320E+00,3.026779E+00,2.967426E+00,2.909237E+00,2.852188E+00,2.796259E+00,2.741426E+00,
2.687668E+00,2.634965E+00,2.583295E+00,2.532638E+00,2.482974E+00,2.434285E+00,2.386550E+00,
2.339751E+00,2.293870E+00,2.248889E+00,2.204789E+00,2.161555E+00,2.119168E+00,2.077612E+00,
2.036872E+00,1.996930E+00,1.957771E+00,1.919380E+00,1.881743E+00,1.844843E+00,1.808667E+00,
1.773200E+00,1.738428E+00,1.704339E+00,1.670918E+00,1.638152E+00,1.606029E+00,1.574536E+00,
1.543660E+00,1.513390E+00,1.483713E+00,1.454618E+00,1.426094E+00,1.398129E+00,1.370713E+00,
1.343834E+00,1.317482E+00,1.291647E+00,1.266319E+00,1.241487E+00,1.217142E+00,1.193275E+00,
1.169876E+00,1.146935E+00,1.124444E+00,1.102395E+00,1.080777E+00,1.059584E+00,1.038806E+00,
1.018436E+00,9.984649E-01,9.788856E-01,9.596902E-01,9.408713E-01,9.224214E-01,9.043333E-01,
8.865998E-01,8.692142E-01,8.521694E-01,8.354589E-01,8.190760E-01,8.030145E-01,7.872678E-01,
7.718300E-01,7.566949E-01,7.418565E-01,7.273092E-01,7.130471E-01,6.990647E-01,6.853565E-01,
6.719170E-01,6.587411E-01,6.458236E-01,6.331594E-01,6.207436E-01,6.085712E-01,5.966374E-01,
5.849378E-01,5.734675E-01,5.622221E-01,5.511973E-01,5.403887E-01,5.297920E-01,5.194031E-01,
5.092179E-01,4.992324E-01,4.894428E-01,4.798451E-01,4.704356E-01,4.612107E-01,4.521666E-01,
4.432999E-01,4.346071E-01,4.260847E-01,4.177294E-01,4.095380E-01,4.015072E-01,3.936339E-01,
3.859150E-01,3.783474E-01,3.709283E-01,3.636546E-01,3.565236E-01,3.495323E-01,3.426782E-01,
3.359585E-01,3.293706E-01],
[1.687500E+02,1.654409E+02,1.621967E+02,1.590161E+02,1.558979E+02,1.528409E+02,1.498438E+02,
3.156554E+02,3.094656E+02,3.033972E+02,2.974477E+02,2.916150E+02,2.858966E+02,2.802903E+02,
4.435440E+02,4.348464E+02,4.263193E+02,4.179594E+02,4.097635E+02,4.017283E+02,3.938506E+02,
5.548775E+02,5.439967E+02,5.333292E+02,5.228710E+02,5.126178E+02,5.025657E+02,4.927107E+02,
6.517989E+02,6.390175E+02,6.264868E+02,6.142018E+02,6.021576E+02,5.903497E+02,5.787733E+02,
7.361739E+02,7.217380E+02,7.075851E+02,6.937098E+02,6.801066E+02,6.667701E+02,6.536952E+02,
8.096266E+02,7.937503E+02,7.781854E+02,7.629256E+02,7.479651E+02,7.332980E+02,7.189184E+02,
7.048209E+02,6.909998E+02,6.774497E+02,6.641653E+02,6.511414E+02,6.383730E+02,6.258549E+02,
6.135822E+02,6.015503E+02,5.897542E+02,5.781895E+02,5.668516E+02,5.557359E+02,5.448383E+02,
5.341544E+02,5.236799E+02,5.134109E+02,5.033432E+02,4.934729E+02,4.837962E+02,4.743093E+02,
4.650084E+02,4.558898E+02,4.469501E+02,4.381857E+02,4.295931E+02,4.211691E+02,4.129102E+02,
4.048133E+02,3.968752E+02,3.890927E+02,3.814628E+02,3.739826E+02,3.666490E+02,3.594592E+02,
3.524104E+02,3.454999E+02,3.387249E+02,3.320827E+02,3.255707E+02,3.191865E+02,3.129274E+02,
3.067911E+02,3.007751E+02,2.948771E+02,2.890947E+02,2.834258E+02,2.778680E+02,2.724191E+02,
2.670772E+02,2.618400E+02,2.567054E+02,2.516716E+02,2.467365E+02,2.418981E+02,2.371546E+02,
2.325042E+02,2.279449E+02,2.234751E+02,2.190929E+02,2.147966E+02,2.105845E+02,2.064551E+02,
2.024067E+02,1.984376E+02,1.945463E+02,1.907314E+02,1.869913E+02,1.833245E+02,1.797296E+02,
1.762052E+02,1.727499E+02,1.693624E+02,1.660413E+02,1.627854E+02,1.595932E+02,1.564637E+02,
1.533956E+02,1.503876E+02,1.474386E+02,1.445474E+02,1.417129E+02,1.389340E+02,1.362096E+02,
1.335386E+02,1.309200E+02,1.283527E+02,1.258358E+02,1.233682E+02,1.209491E+02,1.185773E+02,
1.162521E+02,1.139725E+02,1.117375E+02,1.095464E+02,1.073983E+02,1.052923E+02,1.032276E+02,
1.012033E+02,9.921879E+01,9.727317E+01,9.536570E+01,9.349564E+01,9.166225E+01,8.986481E+01,
8.810261E+01,8.637497E+01,8.468121E+01,8.302067E+01,8.139268E+01,7.979662E+01,7.823186E+01,
7.669778E+01,7.519378E+01,7.371928E+01,7.227369E+01,7.085644E+01,6.946699E+01,6.810479E+01,
6.676929E+01,6.545999E+01,6.417636E+01,6.291790E+01,6.168412E+01,6.047453E+01,5.928866E+01,
5.812605E+01,5.698623E+01,5.586876E+01,5.477321E+01,5.369914E+01,5.264614E+01,5.161378E+01,
5.060166E+01,4.960939E+01,4.863658E+01,4.768285E+01,4.674782E+01,4.583112E+01,4.493240E+01,
4.405131E+01,4.318749E+01,4.234061E+01,4.151033E+01,4.069634E+01,3.989831E+01,3.911593E+01,
3.834889E+01,3.759689E+01,3.685964E+01,3.613684E+01,3.542822E+01,3.473350E+01,3.405239E+01,
3.338465E+01,3.272999E+01,3.208818E+01,3.145895E+01,3.084206E+01,3.023726E+01,2.964433E+01,
2.906302E+01,2.849312E+01,2.793438E+01,2.738661E+01,2.684957E+01,2.632307E+01,2.580689E+01,
2.530083E+01,2.480470E+01,2.431829E+01,2.384143E+01,2.337391E+01,2.291556E+01,2.246620E+01,
2.202565E+01,2.159374E+01,2.117030E+01,2.075517E+01,2.034817E+01,1.994916E+01,1.955796E+01,
1.917444E+01,1.879845E+01,1.842982E+01,1.806842E+01,1.771411E+01,1.736675E+01,1.702620E+01,
1.669232E+01,1.636500E+01,1.604409E+01,1.572947E+01,1.542103E+01,1.511863E+01,1.482217E+01,
1.453151E+01,1.424656E+01,1.396719E+01,1.369330E+01,1.342479E+01,1.316153E+01,1.290344E+01,
1.265042E+01,1.240235E+01,1.215915E+01,1.192071E+01,1.168695E+01,1.145778E+01,1.123310E+01,
1.101283E+01,1.079687E+01,1.058515E+01,1.037758E+01,1.017409E+01,9.974578E+00,9.778982E+00,
9.587222E+00,9.399223E+00,9.214910E+00,9.034211E+00,8.857056E+00,8.683374E+00,8.513098E+00,
8.346162E+00,8.182499E+00,8.022045E+00,7.864737E+00,7.710515E+00,7.559316E+00,7.411083E+00,
7.265756E+00,7.123279E+00,6.983596E+00,6.846652E+00,6.712393E+00,6.580767E+00,6.451722E+00,
6.325208E+00,6.201174E+00,6.079573E+00,5.960356E+00,5.843477E+00,5.728890E+00,5.616550E+00,
5.506413E+00,5.398436E+00,5.292576E+00,5.188792E+00,5.087043E+00,4.987289E+00,4.889491E+00,
4.793611E+00,4.699611E+00,4.607455E+00,4.517105E+00,4.428528E+00,4.341687E+00,4.256549E+00,
4.173081E+00,4.091249E+00,4.011022E+00,3.932369E+00,3.855257E+00,3.779658E+00,3.705541E+00,
3.632878E+00,3.561639E+00,3.491798E+00,3.423326E+00,3.356196E+00,3.290383E+00,3.225861E+00,
3.162604E+00,3.100587E+00,3.039787E+00,2.980178E+00,2.921739E+00,2.864445E+00,2.808275E+00,
2.753207E+00,2.699218E+00,2.646288E+00,2.594396E+00,2.543521E+00,2.493644E+00,2.444746E+00,
2.396806E+00,2.349806E+00,2.303727E+00,2.258553E+00,2.214264E+00,2.170844E+00,2.128275E+00,
2.086540E+00,2.045625E+00,2.005511E+00,1.966184E+00,1.927629E+00,1.889829E+00,1.852771E+00,
1.816439E+00,1.780820E+00,1.745899E+00,1.711663E+00,1.678098E+00,1.645192E+00,1.612931E+00,
1.581302E+00,1.550294E+00,1.519893E+00,1.490089E+00,1.460869E+00,1.432223E+00,1.404138E+00,
1.376603E+00,1.349609E+00]], dtype='float')
for i in range(3):
result[i] = ted_empty.daily_plant_dew_timeseries(i, blp_conc[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_daily_soil_timeseries(self):
"""
:description generates annual timeseries of daily pesticide concentrations in soil
:param i; simulation number/index
:param pore_h2o_conc; daily values of pesticide concentration in soil pore water
:Notes # calculations are performed daily from day of first application (assumed day 0) through the last day of a year
# note: day numbers are synchronized with 0-based array indexing; thus the year does not have a calendar specific
# assoication, rather it is one year from the day of 1st pesticide application
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([[]], dtype='float')
expected_results = [[3.521818E+00,3.285972E+00,3.065920E+00,2.860605E+00,2.669039E+00,2.490301E+00,2.323533E+00,
5.689751E+00,5.308725E+00,4.953216E+00,4.621514E+00,4.312025E+00,4.023261E+00,3.753835E+00,
7.024270E+00,6.553876E+00,6.114982E+00,5.705480E+00,5.323401E+00,4.966909E+00,4.634290E+00,
7.845763E+00,7.320356E+00,6.830133E+00,6.372740E+00,5.945976E+00,5.547792E+00,5.176273E+00,
8.351451E+00,7.792179E+00,7.270360E+00,6.783486E+00,6.329216E+00,5.905368E+00,5.509903E+00,
8.662739E+00,8.082621E+00,7.541352E+00,7.036330E+00,6.565128E+00,6.125481E+00,5.715276E+00,
8.854359E+00,8.261409E+00,7.708167E+00,7.191974E+00,6.710349E+00,6.260977E+00,5.841698E+00,
5.450497E+00,5.085494E+00,4.744933E+00,4.427179E+00,4.130704E+00,3.854083E+00,3.595987E+00,
3.355175E+00,3.130489E+00,2.920849E+00,2.725249E+00,2.542747E+00,2.372467E+00,2.213590E+00,
2.065352E+00,1.927042E+00,1.797994E+00,1.677587E+00,1.565244E+00,1.460425E+00,1.362624E+00,
1.271373E+00,1.186233E+00,1.106795E+00,1.032676E+00,9.635209E-01,8.989968E-01,8.387936E-01,
7.826221E-01,7.302123E-01,6.813121E-01,6.356867E-01,5.931167E-01,5.533974E-01,5.163381E-01,
4.817604E-01,4.494984E-01,4.193968E-01,3.913111E-01,3.651061E-01,3.406561E-01,3.178434E-01,
2.965583E-01,2.766987E-01,2.581690E-01,2.408802E-01,2.247492E-01,2.096984E-01,1.956555E-01,
1.825531E-01,1.703280E-01,1.589217E-01,1.482792E-01,1.383494E-01,1.290845E-01,1.204401E-01,
1.123746E-01,1.048492E-01,9.782777E-02,9.127653E-02,8.516402E-02,7.946084E-02,7.413958E-02,
6.917468E-02,6.454226E-02,6.022005E-02,5.618730E-02,5.242460E-02,4.891388E-02,4.563827E-02,
4.258201E-02,3.973042E-02,3.706979E-02,3.458734E-02,3.227113E-02,3.011003E-02,2.809365E-02,
2.621230E-02,2.445694E-02,2.281913E-02,2.129100E-02,1.986521E-02,1.853490E-02,1.729367E-02,
1.613556E-02,1.505501E-02,1.404682E-02,1.310615E-02,1.222847E-02,1.140957E-02,1.064550E-02,
9.932605E-03,9.267448E-03,8.646835E-03,8.067782E-03,7.527507E-03,7.023412E-03,6.553075E-03,
6.114235E-03,5.704783E-03,5.322751E-03,4.966302E-03,4.633724E-03,4.323417E-03,4.033891E-03,
3.763753E-03,3.511706E-03,3.276538E-03,3.057118E-03,2.852392E-03,2.661376E-03,2.483151E-03,
2.316862E-03,2.161709E-03,2.016946E-03,1.881877E-03,1.755853E-03,1.638269E-03,1.528559E-03,
1.426196E-03,1.330688E-03,1.241576E-03,1.158431E-03,1.080854E-03,1.008473E-03,9.409384E-04,
8.779265E-04,8.191344E-04,7.642794E-04,7.130979E-04,6.653439E-04,6.207878E-04,5.792155E-04,
5.404272E-04,5.042364E-04,4.704692E-04,4.389633E-04,4.095672E-04,3.821397E-04,3.565490E-04,
3.326719E-04,3.103939E-04,2.896077E-04,2.702136E-04,2.521182E-04,2.352346E-04,2.194816E-04,
2.047836E-04,1.910699E-04,1.782745E-04,1.663360E-04,1.551970E-04,1.448039E-04,1.351068E-04,
1.260591E-04,1.176173E-04,1.097408E-04,1.023918E-04,9.553493E-05,8.913724E-05,8.316799E-05,
7.759848E-05,7.240194E-05,6.755340E-05,6.302955E-05,5.880865E-05,5.487041E-05,5.119590E-05,
4.776746E-05,4.456862E-05,4.158399E-05,3.879924E-05,3.620097E-05,3.377670E-05,3.151477E-05,
2.940432E-05,2.743520E-05,2.559795E-05,2.388373E-05,2.228431E-05,2.079200E-05,1.939962E-05,
1.810048E-05,1.688835E-05,1.575739E-05,1.470216E-05,1.371760E-05,1.279898E-05,1.194187E-05,
1.114216E-05,1.039600E-05,9.699809E-06,9.050242E-06,8.444175E-06,7.878693E-06,7.351081E-06,
6.858801E-06,6.399488E-06,5.970933E-06,5.571078E-06,5.197999E-06,4.849905E-06,4.525121E-06,
4.222087E-06,3.939347E-06,3.675540E-06,3.429400E-06,3.199744E-06,2.985467E-06,2.785539E-06,
2.599000E-06,2.424952E-06,2.262561E-06,2.111044E-06,1.969673E-06,1.837770E-06,1.714700E-06,
1.599872E-06,1.492733E-06,1.392769E-06,1.299500E-06,1.212476E-06,1.131280E-06,1.055522E-06,
9.848367E-07,9.188851E-07,8.573501E-07,7.999360E-07,7.463666E-07,6.963847E-07,6.497499E-07,
6.062381E-07,5.656401E-07,5.277609E-07,4.924183E-07,4.594426E-07,4.286751E-07,3.999680E-07,
3.731833E-07,3.481923E-07,3.248749E-07,3.031190E-07,2.828201E-07,2.638805E-07,2.462092E-07,
2.297213E-07,2.143375E-07,1.999840E-07,1.865917E-07,1.740962E-07,1.624375E-07,1.515595E-07,
1.414100E-07,1.319402E-07,1.231046E-07,1.148606E-07,1.071688E-07,9.999199E-08,9.329583E-08,
8.704809E-08,8.121874E-08,7.577976E-08,7.070502E-08,6.597011E-08,6.155229E-08,5.743032E-08,
5.358438E-08,4.999600E-08,4.664791E-08,4.352404E-08,4.060937E-08,3.788988E-08,3.535251E-08,
3.298506E-08,3.077615E-08,2.871516E-08,2.679219E-08,2.499800E-08,2.332396E-08,2.176202E-08,
2.030468E-08,1.894494E-08,1.767625E-08,1.649253E-08,1.538807E-08,1.435758E-08,1.339610E-08,
1.249900E-08,1.166198E-08,1.088101E-08,1.015234E-08,9.472470E-09,8.838127E-09,8.246264E-09,
7.694037E-09,7.178790E-09,6.698048E-09,6.249500E-09,5.830989E-09,5.440505E-09,5.076171E-09,
4.736235E-09,4.419064E-09,4.123132E-09,3.847018E-09,3.589395E-09,3.349024E-09,3.124750E-09,
2.915495E-09,2.720253E-09,2.538086E-09,2.368118E-09,2.209532E-09,2.061566E-09,1.923509E-09,
1.794697E-09,1.674512E-09],
[3.544172E+00,3.306830E+00,3.085381E+00,2.878762E+00,2.685980E+00,2.506108E+00,2.338282E+00,
5.725866E+00,5.342422E+00,4.984656E+00,4.650848E+00,4.339395E+00,4.048799E+00,3.777663E+00,
7.068856E+00,6.595476E+00,6.153797E+00,5.741695E+00,5.357191E+00,4.998436E+00,4.663706E+00,
7.895563E+00,7.366821E+00,6.873487E+00,6.413190E+00,5.983718E+00,5.583006E+00,5.209129E+00,
8.404462E+00,7.841640E+00,7.316509E+00,6.826544E+00,6.369391E+00,5.942852E+00,5.544877E+00,
8.717725E+00,8.133925E+00,7.589220E+00,7.080993E+00,6.606800E+00,6.164363E+00,5.751554E+00,
8.910561E+00,8.313848E+00,7.757094E+00,7.237625E+00,6.752943E+00,6.300718E+00,5.878778E+00,
5.485094E+00,5.117774E+00,4.775052E+00,4.455281E+00,4.156924E+00,3.878547E+00,3.618812E+00,
3.376471E+00,3.150359E+00,2.939389E+00,2.742547E+00,2.558887E+00,2.387526E+00,2.227640E+00,
2.078462E+00,1.939274E+00,1.809406E+00,1.688236E+00,1.575180E+00,1.469695E+00,1.371273E+00,
1.279443E+00,1.193763E+00,1.113820E+00,1.039231E+00,9.696368E-01,9.047031E-01,8.441178E-01,
7.875898E-01,7.348473E-01,6.856367E-01,6.397217E-01,5.968815E-01,5.569101E-01,5.196155E-01,
4.848184E-01,4.523516E-01,4.220589E-01,3.937949E-01,3.674236E-01,3.428184E-01,3.198608E-01,
2.984407E-01,2.784550E-01,2.598077E-01,2.424092E-01,2.261758E-01,2.110295E-01,1.968974E-01,
1.837118E-01,1.714092E-01,1.599304E-01,1.492204E-01,1.392275E-01,1.299039E-01,1.212046E-01,
1.130879E-01,1.055147E-01,9.844872E-02,9.185591E-02,8.570459E-02,7.996521E-02,7.461018E-02,
6.961376E-02,6.495194E-02,6.060230E-02,5.654394E-02,5.275737E-02,4.922436E-02,4.592795E-02,
4.285230E-02,3.998261E-02,3.730509E-02,3.480688E-02,3.247597E-02,3.030115E-02,2.827197E-02,
2.637868E-02,2.461218E-02,2.296398E-02,2.142615E-02,1.999130E-02,1.865255E-02,1.740344E-02,
1.623798E-02,1.515057E-02,1.413599E-02,1.318934E-02,1.230609E-02,1.148199E-02,1.071307E-02,
9.995652E-03,9.326273E-03,8.701720E-03,8.118992E-03,7.575287E-03,7.067993E-03,6.594671E-03,
6.153045E-03,5.740994E-03,5.356537E-03,4.997826E-03,4.663136E-03,4.350860E-03,4.059496E-03,
3.787644E-03,3.533997E-03,3.297335E-03,3.076523E-03,2.870497E-03,2.678269E-03,2.498913E-03,
2.331568E-03,2.175430E-03,2.029748E-03,1.893822E-03,1.766998E-03,1.648668E-03,1.538261E-03,
1.435249E-03,1.339134E-03,1.249456E-03,1.165784E-03,1.087715E-03,1.014874E-03,9.469109E-04,
8.834991E-04,8.243338E-04,7.691307E-04,7.176243E-04,6.695671E-04,6.247282E-04,5.828920E-04,
5.438575E-04,5.074370E-04,4.734555E-04,4.417496E-04,4.121669E-04,3.845653E-04,3.588121E-04,
3.347836E-04,3.123641E-04,2.914460E-04,2.719288E-04,2.537185E-04,2.367277E-04,2.208748E-04,
2.060835E-04,1.922827E-04,1.794061E-04,1.673918E-04,1.561821E-04,1.457230E-04,1.359644E-04,
1.268592E-04,1.183639E-04,1.104374E-04,1.030417E-04,9.614133E-05,8.970304E-05,8.369589E-05,
7.809103E-05,7.286151E-05,6.798219E-05,6.342962E-05,5.918193E-05,5.521870E-05,5.152086E-05,
4.807067E-05,4.485152E-05,4.184795E-05,3.904551E-05,3.643075E-05,3.399109E-05,3.171481E-05,
2.959097E-05,2.760935E-05,2.576043E-05,2.403533E-05,2.242576E-05,2.092397E-05,1.952276E-05,
1.821538E-05,1.699555E-05,1.585741E-05,1.479548E-05,1.380467E-05,1.288022E-05,1.201767E-05,
1.121288E-05,1.046199E-05,9.761378E-06,9.107688E-06,8.497774E-06,7.928703E-06,7.397742E-06,
6.902337E-06,6.440108E-06,6.008833E-06,5.606440E-06,5.230993E-06,4.880689E-06,4.553844E-06,
4.248887E-06,3.964352E-06,3.698871E-06,3.451168E-06,3.220054E-06,3.004417E-06,2.803220E-06,
2.615497E-06,2.440345E-06,2.276922E-06,2.124443E-06,1.982176E-06,1.849435E-06,1.725584E-06,
1.610027E-06,1.502208E-06,1.401610E-06,1.307748E-06,1.220172E-06,1.138461E-06,1.062222E-06,
9.910879E-07,9.247177E-07,8.627921E-07,8.050135E-07,7.511042E-07,7.008050E-07,6.538742E-07,
6.100862E-07,5.692305E-07,5.311108E-07,4.955439E-07,4.623588E-07,4.313961E-07,4.025068E-07,
3.755521E-07,3.504025E-07,3.269371E-07,3.050431E-07,2.846153E-07,2.655554E-07,2.477720E-07,
2.311794E-07,2.156980E-07,2.012534E-07,1.877760E-07,1.752012E-07,1.634685E-07,1.525215E-07,
1.423076E-07,1.327777E-07,1.238860E-07,1.155897E-07,1.078490E-07,1.006267E-07,9.388802E-08,
8.760062E-08,8.173427E-08,7.626077E-08,7.115381E-08,6.638886E-08,6.194299E-08,5.779486E-08,
5.392451E-08,5.031334E-08,4.694401E-08,4.380031E-08,4.086713E-08,3.813038E-08,3.557691E-08,
3.319443E-08,3.097150E-08,2.889743E-08,2.696225E-08,2.515667E-08,2.347201E-08,2.190016E-08,
2.043357E-08,1.906519E-08,1.778845E-08,1.659721E-08,1.548575E-08,1.444871E-08,1.348113E-08,
1.257834E-08,1.173600E-08,1.095008E-08,1.021678E-08,9.532596E-09,8.894227E-09,8.298607E-09,
7.742874E-09,7.224357E-09,6.740563E-09,6.289168E-09,5.868001E-09,5.475039E-09,5.108392E-09,
4.766298E-09,4.447113E-09,4.149303E-09,3.871437E-09,3.612178E-09,3.370282E-09,3.144584E-09,
2.934001E-09,2.737519E-09,2.554196E-09,2.383149E-09,2.223557E-09,2.074652E-09,1.935719E-09,
1.806089E-09,1.685141E-09],
[3.555456E+00,3.317358E+00,3.095204E+00,2.887928E+00,2.694532E+00,2.514087E+00,2.345726E+00,
5.744096E+00,5.359431E+00,5.000526E+00,4.665656E+00,4.353211E+00,4.061689E+00,3.789690E+00,
7.091362E+00,6.616475E+00,6.173389E+00,5.759976E+00,5.374248E+00,5.014350E+00,4.678554E+00,
7.920702E+00,7.390276E+00,6.895371E+00,6.433609E+00,6.002769E+00,5.600782E+00,5.225714E+00,
8.431220E+00,7.866606E+00,7.339803E+00,6.848279E+00,6.389670E+00,5.961773E+00,5.562531E+00,
8.745481E+00,8.159822E+00,7.613383E+00,7.103538E+00,6.627835E+00,6.183989E+00,5.769866E+00,
8.938931E+00,8.340318E+00,7.781792E+00,7.260668E+00,6.774443E+00,6.320779E+00,5.897495E+00,
5.502558E+00,5.134068E+00,4.790255E+00,4.469466E+00,4.170159E+00,3.890896E+00,3.630334E+00,
3.387221E+00,3.160389E+00,2.948748E+00,2.751279E+00,2.567034E+00,2.395127E+00,2.234733E+00,
2.085079E+00,1.945448E+00,1.815167E+00,1.693611E+00,1.580195E+00,1.474374E+00,1.375639E+00,
1.283517E+00,1.197564E+00,1.117366E+00,1.042540E+00,9.727239E-01,9.075835E-01,8.468054E-01,
7.900974E-01,7.371869E-01,6.878197E-01,6.417585E-01,5.987818E-01,5.586832E-01,5.212699E-01,
4.863620E-01,4.537918E-01,4.234027E-01,3.950487E-01,3.685934E-01,3.439098E-01,3.208792E-01,
2.993909E-01,2.793416E-01,2.606349E-01,2.431810E-01,2.268959E-01,2.117013E-01,1.975243E-01,
1.842967E-01,1.719549E-01,1.604396E-01,1.496955E-01,1.396708E-01,1.303175E-01,1.215905E-01,
1.134479E-01,1.058507E-01,9.876217E-02,9.214836E-02,8.597746E-02,8.021981E-02,7.484773E-02,
6.983540E-02,6.515873E-02,6.079525E-02,5.672397E-02,5.292534E-02,4.938108E-02,4.607418E-02,
4.298873E-02,4.010990E-02,3.742386E-02,3.491770E-02,3.257937E-02,3.039762E-02,2.836199E-02,
2.646267E-02,2.469054E-02,2.303709E-02,2.149437E-02,2.005495E-02,1.871193E-02,1.745885E-02,
1.628968E-02,1.519881E-02,1.418099E-02,1.323133E-02,1.234527E-02,1.151855E-02,1.074718E-02,
1.002748E-02,9.355966E-03,8.729425E-03,8.144841E-03,7.599406E-03,7.090496E-03,6.615667E-03,
6.172636E-03,5.759273E-03,5.373591E-03,5.013738E-03,4.677983E-03,4.364712E-03,4.072421E-03,
3.799703E-03,3.545248E-03,3.307833E-03,3.086318E-03,2.879636E-03,2.686796E-03,2.506869E-03,
2.338991E-03,2.182356E-03,2.036210E-03,1.899851E-03,1.772624E-03,1.653917E-03,1.543159E-03,
1.439818E-03,1.343398E-03,1.253435E-03,1.169496E-03,1.091178E-03,1.018105E-03,9.499257E-04,
8.863120E-04,8.269584E-04,7.715794E-04,7.199091E-04,6.716989E-04,6.267173E-04,5.847479E-04,
5.455891E-04,5.090526E-04,4.749629E-04,4.431560E-04,4.134792E-04,3.857897E-04,3.599545E-04,
3.358495E-04,3.133586E-04,2.923739E-04,2.727945E-04,2.545263E-04,2.374814E-04,2.215780E-04,
2.067396E-04,1.928949E-04,1.799773E-04,1.679247E-04,1.566793E-04,1.461870E-04,1.363973E-04,
1.272631E-04,1.187407E-04,1.107890E-04,1.033698E-04,9.644743E-05,8.998863E-05,8.396236E-05,
7.833966E-05,7.309348E-05,6.819863E-05,6.363157E-05,5.937036E-05,5.539450E-05,5.168490E-05,
4.822372E-05,4.499432E-05,4.198118E-05,3.916983E-05,3.654674E-05,3.409932E-05,3.181579E-05,
2.968518E-05,2.769725E-05,2.584245E-05,2.411186E-05,2.249716E-05,2.099059E-05,1.958491E-05,
1.827337E-05,1.704966E-05,1.590789E-05,1.484259E-05,1.384863E-05,1.292122E-05,1.205593E-05,
1.124858E-05,1.049530E-05,9.792457E-06,9.136686E-06,8.524829E-06,7.953947E-06,7.421295E-06,
6.924313E-06,6.460612E-06,6.027964E-06,5.624290E-06,5.247648E-06,4.896229E-06,4.568343E-06,
4.262415E-06,3.976973E-06,3.710647E-06,3.462156E-06,3.230306E-06,3.013982E-06,2.812145E-06,
2.623824E-06,2.448114E-06,2.284171E-06,2.131207E-06,1.988487E-06,1.855324E-06,1.731078E-06,
1.615153E-06,1.506991E-06,1.406072E-06,1.311912E-06,1.224057E-06,1.142086E-06,1.065604E-06,
9.942433E-07,9.276618E-07,8.655391E-07,8.075765E-07,7.534956E-07,7.030362E-07,6.559560E-07,
6.120286E-07,5.710428E-07,5.328018E-07,4.971217E-07,4.638309E-07,4.327695E-07,4.037883E-07,
3.767478E-07,3.515181E-07,3.279780E-07,3.060143E-07,2.855214E-07,2.664009E-07,2.485608E-07,
2.319155E-07,2.163848E-07,2.018941E-07,1.883739E-07,1.757591E-07,1.639890E-07,1.530071E-07,
1.427607E-07,1.332005E-07,1.242804E-07,1.159577E-07,1.081924E-07,1.009471E-07,9.418694E-08,
8.787953E-08,8.199450E-08,7.650357E-08,7.138036E-08,6.660023E-08,6.214021E-08,5.797886E-08,
5.409619E-08,5.047353E-08,4.709347E-08,4.393976E-08,4.099725E-08,3.825179E-08,3.569018E-08,
3.330011E-08,3.107010E-08,2.898943E-08,2.704810E-08,2.523677E-08,2.354674E-08,2.196988E-08,
2.049862E-08,1.912589E-08,1.784509E-08,1.665006E-08,1.553505E-08,1.449472E-08,1.352405E-08,
1.261838E-08,1.177337E-08,1.098494E-08,1.024931E-08,9.562946E-09,8.922544E-09,8.325028E-09,
7.767526E-09,7.247358E-09,6.762024E-09,6.309192E-09,5.886684E-09,5.492470E-09,5.124656E-09,
4.781473E-09,4.461272E-09,4.162514E-09,3.883763E-09,3.623679E-09,3.381012E-09,3.154596E-09,
2.943342E-09,2.746235E-09,2.562328E-09,2.390737E-09,2.230636E-09,2.081257E-09,1.941882E-09,
1.811840E-09,1.690506E-09]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
ted_empty.soil_foc = 0.015
# input variables that change per simulation
ted_empty.koc = pd.Series([1000., 1500., 2000.], dtype='float')
# internally calculated variables
pore_h2o_conc = pd.Series([[2.347878E-01,2.190648E-01,2.043947E-01,1.907070E-01,1.779359E-01,1.660201E-01,
1.549022E-01,3.793167E-01,3.539150E-01,3.302144E-01,3.081009E-01,2.874683E-01,
2.682174E-01,2.502557E-01,4.682847E-01,4.369250E-01,4.076655E-01,3.803653E-01,
3.548934E-01,3.311273E-01,3.089527E-01,5.230509E-01,4.880237E-01,4.553422E-01,
4.248493E-01,3.963984E-01,3.698528E-01,3.450849E-01,5.567634E-01,5.194786E-01,
4.846907E-01,4.522324E-01,4.219478E-01,3.936912E-01,3.673269E-01,5.775159E-01,
5.388414E-01,5.027568E-01,4.690887E-01,4.376752E-01,4.083654E-01,3.810184E-01,
5.902906E-01,5.507606E-01,5.138778E-01,4.794649E-01,4.473566E-01,4.173985E-01,
3.894465E-01,3.633665E-01,3.390329E-01,3.163289E-01,2.951453E-01,2.753803E-01,
2.569389E-01,2.397325E-01,2.236783E-01,2.086992E-01,1.947233E-01,1.816832E-01,
1.695165E-01,1.581644E-01,1.475726E-01,1.376901E-01,1.284694E-01,1.198662E-01,
1.118392E-01,1.043496E-01,9.736164E-02,9.084162E-02,8.475823E-02,7.908222E-02,
7.378632E-02,6.884507E-02,6.423472E-02,5.993312E-02,5.591958E-02,5.217481E-02,
4.868082E-02,4.542081E-02,4.237911E-02,3.954111E-02,3.689316E-02,3.442254E-02,
3.211736E-02,2.996656E-02,2.795979E-02,2.608740E-02,2.434041E-02,2.271040E-02,
2.118956E-02,1.977056E-02,1.844658E-02,1.721127E-02,1.605868E-02,1.498328E-02,
1.397989E-02,1.304370E-02,1.217020E-02,1.135520E-02,1.059478E-02,9.885278E-03,
9.223290E-03,8.605634E-03,8.029341E-03,7.491640E-03,6.989947E-03,6.521851E-03,
6.085102E-03,5.677601E-03,5.297389E-03,4.942639E-03,4.611645E-03,4.302817E-03,
4.014670E-03,3.745820E-03,3.494973E-03,3.260926E-03,3.042551E-03,2.838801E-03,
2.648695E-03,2.471319E-03,2.305823E-03,2.151409E-03,2.007335E-03,1.872910E-03,
1.747487E-03,1.630463E-03,1.521276E-03,1.419400E-03,1.324347E-03,1.235660E-03,
1.152911E-03,1.075704E-03,1.003668E-03,9.364550E-04,8.737434E-04,8.152314E-04,
7.606378E-04,7.097001E-04,6.621737E-04,6.178299E-04,5.764556E-04,5.378521E-04,
5.018338E-04,4.682275E-04,4.368717E-04,4.076157E-04,3.803189E-04,3.548501E-04,
3.310868E-04,3.089149E-04,2.882278E-04,2.689261E-04,2.509169E-04,2.341137E-04,
2.184358E-04,2.038078E-04,1.901594E-04,1.774250E-04,1.655434E-04,1.544575E-04,
1.441139E-04,1.344630E-04,1.254584E-04,1.170569E-04,1.092179E-04,1.019039E-04,
9.507972E-05,8.871252E-05,8.277171E-05,7.722873E-05,7.205696E-05,6.723152E-05,
6.272922E-05,5.852844E-05,5.460896E-05,5.095196E-05,4.753986E-05,4.435626E-05,
4.138585E-05,3.861437E-05,3.602848E-05,3.361576E-05,3.136461E-05,2.926422E-05,
2.730448E-05,2.547598E-05,2.376993E-05,2.217813E-05,2.069293E-05,1.930718E-05,
1.801424E-05,1.680788E-05,1.568231E-05,1.463211E-05,1.365224E-05,1.273799E-05,
1.188497E-05,1.108906E-05,1.034646E-05,9.653592E-06,9.007119E-06,8.403940E-06,
7.841153E-06,7.316054E-06,6.826120E-06,6.368995E-06,5.942483E-06,5.544532E-06,
5.173232E-06,4.826796E-06,4.503560E-06,4.201970E-06,3.920576E-06,3.658027E-06,
3.413060E-06,3.184498E-06,2.971241E-06,2.772266E-06,2.586616E-06,2.413398E-06,
2.251780E-06,2.100985E-06,1.960288E-06,1.829014E-06,1.706530E-06,1.592249E-06,
1.485621E-06,1.386133E-06,1.293308E-06,1.206699E-06,1.125890E-06,1.050492E-06,
9.801441E-07,9.145068E-07,8.532650E-07,7.961244E-07,7.428103E-07,6.930666E-07,
6.466540E-07,6.033495E-07,5.629450E-07,5.252462E-07,4.900721E-07,4.572534E-07,
4.266325E-07,3.980622E-07,3.714052E-07,3.465333E-07,3.233270E-07,3.016747E-07,
2.814725E-07,2.626231E-07,2.450360E-07,2.286267E-07,2.133163E-07,1.990311E-07,
1.857026E-07,1.732666E-07,1.616635E-07,1.508374E-07,1.407362E-07,1.313116E-07,
1.225180E-07,1.143133E-07,1.066581E-07,9.951555E-08,9.285129E-08,8.663332E-08,
8.083174E-08,7.541868E-08,7.036812E-08,6.565578E-08,6.125901E-08,5.715667E-08,
5.332906E-08,4.975778E-08,4.642565E-08,4.331666E-08,4.041587E-08,3.770934E-08,
3.518406E-08,3.282789E-08,3.062950E-08,2.857834E-08,2.666453E-08,2.487889E-08,
2.321282E-08,2.165833E-08,2.020794E-08,1.885467E-08,1.759203E-08,1.641394E-08,
1.531475E-08,1.428917E-08,1.333227E-08,1.243944E-08,1.160641E-08,1.082916E-08,
1.010397E-08,9.427336E-09,8.796015E-09,8.206972E-09,7.657376E-09,7.144584E-09,
6.666133E-09,6.219722E-09,5.803206E-09,5.414582E-09,5.051984E-09,4.713668E-09,
4.398008E-09,4.103486E-09,3.828688E-09,3.572292E-09,3.333066E-09,3.109861E-09,
2.901603E-09,2.707291E-09,2.525992E-09,2.356834E-09,2.199004E-09,2.051743E-09,
1.914344E-09,1.786146E-09,1.666533E-09,1.554930E-09,1.450801E-09,1.353646E-09,
1.262996E-09,1.178417E-09,1.099502E-09,1.025872E-09,9.571720E-10,8.930730E-10,
8.332666E-10,7.774652E-10,7.254007E-10,6.768228E-10,6.314980E-10,5.892085E-10,
5.497509E-10,5.129358E-10,4.785860E-10,4.465365E-10,4.166333E-10,3.887326E-10,
3.627004E-10,3.384114E-10,3.157490E-10,2.946042E-10,2.748755E-10,2.564679E-10,
2.392930E-10,2.232683E-10,2.083167E-10,1.943663E-10,1.813502E-10,1.692057E-10,
1.578745E-10,1.473021E-10,1.374377E-10,1.282339E-10,1.196465E-10,1.116341E-10],
[1.575188E-01,1.469702E-01,1.371280E-01,1.279450E-01,1.193769E-01,1.113826E-01,
1.039236E-01,2.544829E-01,2.374410E-01,2.215403E-01,2.067044E-01,1.928620E-01,
1.799466E-01,1.678961E-01,3.141714E-01,2.931323E-01,2.735021E-01,2.551865E-01,
2.380974E-01,2.221527E-01,2.072758E-01,3.509139E-01,3.274143E-01,3.054883E-01,
2.850307E-01,2.659430E-01,2.481336E-01,2.315169E-01,3.735316E-01,3.485173E-01,
3.251782E-01,3.034020E-01,2.830840E-01,2.641267E-01,2.464390E-01,3.874544E-01,
3.615078E-01,3.372987E-01,3.147108E-01,2.936356E-01,2.739717E-01,2.556246E-01,
3.960250E-01,3.695043E-01,3.447597E-01,3.216722E-01,3.001308E-01,2.800319E-01,
2.612790E-01,2.437820E-01,2.274566E-01,2.122245E-01,1.980125E-01,1.847522E-01,
1.723799E-01,1.608361E-01,1.500654E-01,1.400160E-01,1.306395E-01,1.218910E-01,
1.137283E-01,1.061123E-01,9.900624E-02,9.237609E-02,8.618994E-02,8.041805E-02,
7.503270E-02,7.000798E-02,6.531976E-02,6.094549E-02,5.686415E-02,5.305613E-02,
4.950312E-02,4.618804E-02,4.309497E-02,4.020903E-02,3.751635E-02,3.500399E-02,
3.265988E-02,3.047274E-02,2.843208E-02,2.652806E-02,2.475156E-02,2.309402E-02,
2.154748E-02,2.010451E-02,1.875817E-02,1.750200E-02,1.632994E-02,1.523637E-02,
1.421604E-02,1.326403E-02,1.237578E-02,1.154701E-02,1.077374E-02,1.005226E-02,
9.379087E-03,8.750998E-03,8.164970E-03,7.618186E-03,7.108019E-03,6.632016E-03,
6.187890E-03,5.773505E-03,5.386871E-03,5.026128E-03,4.689544E-03,4.375499E-03,
4.082485E-03,3.809093E-03,3.554009E-03,3.316008E-03,3.093945E-03,2.886753E-03,
2.693435E-03,2.513064E-03,2.344772E-03,2.187749E-03,2.041242E-03,1.904547E-03,
1.777005E-03,1.658004E-03,1.546972E-03,1.443376E-03,1.346718E-03,1.256532E-03,
1.172386E-03,1.093875E-03,1.020621E-03,9.522733E-04,8.885024E-04,8.290020E-04,
7.734862E-04,7.216882E-04,6.733589E-04,6.282660E-04,5.861929E-04,5.469374E-04,
5.103106E-04,4.761366E-04,4.442512E-04,4.145010E-04,3.867431E-04,3.608441E-04,
3.366794E-04,3.141330E-04,2.930965E-04,2.734687E-04,2.551553E-04,2.380683E-04,
2.221256E-04,2.072505E-04,1.933716E-04,1.804220E-04,1.683397E-04,1.570665E-04,
1.465482E-04,1.367343E-04,1.275777E-04,1.190342E-04,1.110628E-04,1.036253E-04,
9.668578E-05,9.021102E-05,8.416986E-05,7.853326E-05,7.327412E-05,6.836717E-05,
6.378883E-05,5.951708E-05,5.553140E-05,5.181263E-05,4.834289E-05,4.510551E-05,
4.208493E-05,3.926663E-05,3.663706E-05,3.418358E-05,3.189441E-05,2.975854E-05,
2.776570E-05,2.590631E-05,2.417144E-05,2.255276E-05,2.104246E-05,1.963331E-05,
1.831853E-05,1.709179E-05,1.594721E-05,1.487927E-05,1.388285E-05,1.295316E-05,
1.208572E-05,1.127638E-05,1.052123E-05,9.816657E-06,9.159265E-06,8.545896E-06,
7.973603E-06,7.439635E-06,6.941425E-06,6.476578E-06,6.042861E-06,5.638189E-06,
5.260616E-06,4.908328E-06,4.579632E-06,4.272948E-06,3.986802E-06,3.719817E-06,
3.470712E-06,3.238289E-06,3.021431E-06,2.819094E-06,2.630308E-06,2.454164E-06,
2.289816E-06,2.136474E-06,1.993401E-06,1.859909E-06,1.735356E-06,1.619145E-06,
1.510715E-06,1.409547E-06,1.315154E-06,1.227082E-06,1.144908E-06,1.068237E-06,
9.967004E-07,9.299543E-07,8.676781E-07,8.095723E-07,7.553576E-07,7.047736E-07,
6.575770E-07,6.135411E-07,5.724540E-07,5.341185E-07,4.983502E-07,4.649772E-07,
4.338390E-07,4.047861E-07,3.776788E-07,3.523868E-07,3.287885E-07,3.067705E-07,
2.862270E-07,2.670593E-07,2.491751E-07,2.324886E-07,2.169195E-07,2.023931E-07,
1.888394E-07,1.761934E-07,1.643943E-07,1.533853E-07,1.431135E-07,1.335296E-07,
1.245875E-07,1.162443E-07,1.084598E-07,1.011965E-07,9.441971E-08,8.809670E-08,
8.219713E-08,7.669263E-08,7.155676E-08,6.676481E-08,6.229377E-08,5.812215E-08,
5.422988E-08,5.059827E-08,4.720985E-08,4.404835E-08,4.109856E-08,3.834632E-08,
3.577838E-08,3.338241E-08,3.114689E-08,2.906107E-08,2.711494E-08,2.529913E-08,
2.360493E-08,2.202418E-08,2.054928E-08,1.917316E-08,1.788919E-08,1.669120E-08,
1.557344E-08,1.453054E-08,1.355747E-08,1.264957E-08,1.180246E-08,1.101209E-08,
1.027464E-08,9.586579E-09,8.944595E-09,8.345602E-09,7.786722E-09,7.265268E-09,
6.778735E-09,6.324783E-09,5.901232E-09,5.506044E-09,5.137321E-09,4.793290E-09,
4.472297E-09,4.172801E-09,3.893361E-09,3.632634E-09,3.389368E-09,3.162392E-09,
2.950616E-09,2.753022E-09,2.568660E-09,2.396645E-09,2.236149E-09,2.086400E-09,
1.946680E-09,1.816317E-09,1.694684E-09,1.581196E-09,1.475308E-09,1.376511E-09,
1.284330E-09,1.198322E-09,1.118074E-09,1.043200E-09,9.733402E-10,9.081585E-10,
8.473419E-10,7.905979E-10,7.376540E-10,6.882555E-10,6.421651E-10,5.991612E-10,
5.590372E-10,5.216001E-10,4.866701E-10,4.540793E-10,4.236709E-10,3.952990E-10,
3.688270E-10,3.441277E-10,3.210825E-10,2.995806E-10,2.795186E-10,2.608001E-10,
2.433351E-10,2.270396E-10,2.118355E-10,1.976495E-10,1.844135E-10,1.720639E-10,
1.605413E-10,1.497903E-10,1.397593E-10,1.304000E-10,1.216675E-10,1.135198E-10,
1.059177E-10,9.882474E-11,9.220674E-11,8.603193E-11,8.027063E-11,7.489515E-11],
[1.185152E-01,1.105786E-01,1.031735E-01,9.626426E-02,8.981773E-02,8.380291E-02,
7.819088E-02,1.914699E-01,1.786477E-01,1.666842E-01,1.555219E-01,1.451070E-01,
1.353896E-01,1.263230E-01,2.363787E-01,2.205492E-01,2.057796E-01,1.919992E-01,
1.791416E-01,1.671450E-01,1.559518E-01,2.640234E-01,2.463425E-01,2.298457E-01,
2.144536E-01,2.000923E-01,1.866927E-01,1.741905E-01,2.810407E-01,2.622202E-01,
2.446601E-01,2.282760E-01,2.129890E-01,1.987258E-01,1.854177E-01,2.915160E-01,
2.719941E-01,2.537794E-01,2.367846E-01,2.209278E-01,2.061330E-01,1.923289E-01,
2.979644E-01,2.780106E-01,2.593931E-01,2.420223E-01,2.258148E-01,2.106926E-01,
1.965832E-01,1.834186E-01,1.711356E-01,1.596752E-01,1.489822E-01,1.390053E-01,
1.296965E-01,1.210111E-01,1.129074E-01,1.053463E-01,9.829159E-02,9.170929E-02,
8.556780E-02,7.983758E-02,7.449109E-02,6.950265E-02,6.484826E-02,6.050557E-02,
5.645369E-02,5.267316E-02,4.914579E-02,4.585465E-02,4.278390E-02,3.991879E-02,
3.724555E-02,3.475132E-02,3.242413E-02,3.025278E-02,2.822685E-02,2.633658E-02,
2.457290E-02,2.292732E-02,2.139195E-02,1.995939E-02,1.862277E-02,1.737566E-02,
1.621207E-02,1.512639E-02,1.411342E-02,1.316829E-02,1.228645E-02,1.146366E-02,
1.069597E-02,9.979697E-03,9.311387E-03,8.687831E-03,8.106033E-03,7.563196E-03,
7.056711E-03,6.584145E-03,6.143224E-03,5.731831E-03,5.347987E-03,4.989849E-03,
4.655693E-03,4.343915E-03,4.053016E-03,3.781598E-03,3.528356E-03,3.292072E-03,
3.071612E-03,2.865915E-03,2.673994E-03,2.494924E-03,2.327847E-03,2.171958E-03,
2.026508E-03,1.890799E-03,1.764178E-03,1.646036E-03,1.535806E-03,1.432958E-03,
1.336997E-03,1.247462E-03,1.163923E-03,1.085979E-03,1.013254E-03,9.453995E-04,
8.820889E-04,8.230181E-04,7.679030E-04,7.164788E-04,6.684984E-04,6.237311E-04,
5.819617E-04,5.429894E-04,5.066271E-04,4.726998E-04,4.410445E-04,4.115090E-04,
3.839515E-04,3.582394E-04,3.342492E-04,3.118655E-04,2.909808E-04,2.714947E-04,
2.533135E-04,2.363499E-04,2.205222E-04,2.057545E-04,1.919758E-04,1.791197E-04,
1.671246E-04,1.559328E-04,1.454904E-04,1.357474E-04,1.266568E-04,1.181749E-04,
1.102611E-04,1.028773E-04,9.598788E-05,8.955986E-05,8.356230E-05,7.796638E-05,
7.274521E-05,6.787368E-05,6.332838E-05,5.908747E-05,5.513056E-05,5.143863E-05,
4.799394E-05,4.477993E-05,4.178115E-05,3.898319E-05,3.637260E-05,3.393684E-05,
3.166419E-05,2.954373E-05,2.756528E-05,2.571931E-05,2.399697E-05,2.238996E-05,
2.089058E-05,1.949160E-05,1.818630E-05,1.696842E-05,1.583210E-05,1.477187E-05,
1.378264E-05,1.285966E-05,1.199848E-05,1.119498E-05,1.044529E-05,9.745798E-06,
9.093151E-06,8.484210E-06,7.916048E-06,7.385934E-06,6.891320E-06,6.429829E-06,
5.999242E-06,5.597491E-06,5.222644E-06,4.872899E-06,4.546575E-06,4.242105E-06,
3.958024E-06,3.692967E-06,3.445660E-06,3.214914E-06,2.999621E-06,2.798745E-06,
2.611322E-06,2.436449E-06,2.273288E-06,2.121052E-06,1.979012E-06,1.846483E-06,
1.722830E-06,1.607457E-06,1.499811E-06,1.399373E-06,1.305661E-06,1.218225E-06,
1.136644E-06,1.060526E-06,9.895060E-07,9.232417E-07,8.614150E-07,8.037286E-07,
7.499053E-07,6.996864E-07,6.528305E-07,6.091124E-07,5.683219E-07,5.302631E-07,
4.947530E-07,4.616209E-07,4.307075E-07,4.018643E-07,3.749526E-07,3.498432E-07,
3.264152E-07,3.045562E-07,2.841610E-07,2.651316E-07,2.473765E-07,2.308104E-07,
2.153537E-07,2.009321E-07,1.874763E-07,1.749216E-07,1.632076E-07,1.522781E-07,
1.420805E-07,1.325658E-07,1.236882E-07,1.154052E-07,1.076769E-07,1.004661E-07,
9.373816E-08,8.746080E-08,8.160381E-08,7.613905E-08,7.104024E-08,6.628289E-08,
6.184412E-08,5.770261E-08,5.383844E-08,5.023304E-08,4.686908E-08,4.373040E-08,
4.080190E-08,3.806952E-08,3.552012E-08,3.314144E-08,3.092206E-08,2.885130E-08,
2.691922E-08,2.511652E-08,2.343454E-08,2.186520E-08,2.040095E-08,1.903476E-08,
1.776006E-08,1.657072E-08,1.546103E-08,1.442565E-08,1.345961E-08,1.255826E-08,
1.171727E-08,1.093260E-08,1.020048E-08,9.517381E-09,8.880030E-09,8.285361E-09,
7.730515E-09,7.212826E-09,6.729804E-09,6.279130E-09,5.858635E-09,5.466300E-09,
5.100238E-09,4.758690E-09,4.440015E-09,4.142681E-09,3.865258E-09,3.606413E-09,
3.364902E-09,3.139565E-09,2.929318E-09,2.733150E-09,2.550119E-09,2.379345E-09,
2.220008E-09,2.071340E-09,1.932629E-09,1.803206E-09,1.682451E-09,1.569782E-09,
1.464659E-09,1.366575E-09,1.275060E-09,1.189673E-09,1.110004E-09,1.035670E-09,
9.663144E-10,9.016032E-10,8.412256E-10,7.848912E-10,7.323294E-10,6.832875E-10,
6.375298E-10,5.948363E-10,5.550019E-10,5.178351E-10,4.831572E-10,4.508016E-10,
4.206128E-10,3.924456E-10,3.661647E-10,3.416437E-10,3.187649E-10,2.974181E-10,
2.775009E-10,2.589175E-10,2.415786E-10,2.254008E-10,2.103064E-10,1.962228E-10,
1.830823E-10,1.708219E-10,1.593824E-10,1.487091E-10,1.387505E-10,1.294588E-10,
1.207893E-10,1.127004E-10,1.051532E-10,9.811140E-11,9.154117E-11,8.541093E-11,
7.969122E-11,7.435454E-11,6.937524E-11,6.472938E-11,6.039465E-11,5.635020E-11]], dtype='float')
for i in range(3):
result[i] = ted_empty.daily_soil_timeseries(i, pore_h2o_conc[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_daily_soil_inv_timeseries(self):
"""
:description generates annual timeseries of daily pesticide concentrations in soil invertebrates (earthworms)
:param i; simulation number/index
:param pore_h2o_conc; daily values of pesticide concentration in soil pore water
:Notes # calculations are performed daily from day of first application (assumed day 0) through the last day of a year
# note: day numbers are synchronized with 0-based array indexing; thus the year does not have a calendar specific
# assoication, rather it is one year from the day of 1st pesticide application
# this represents Eq 2 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([[]], dtype='float')
expected_results = [[2.347878E+02,2.190648E+02,2.043947E+02,1.907070E+02,1.779359E+02,1.660201E+02,
1.549022E+02,3.793167E+02,3.539150E+02,3.302144E+02,3.081009E+02,2.874683E+02,
2.682174E+02,2.502557E+02,4.682847E+02,4.369250E+02,4.076655E+02,3.803653E+02,
3.548934E+02,3.311273E+02,3.089527E+02,5.230509E+02,4.880237E+02,4.553422E+02,
4.248493E+02,3.963984E+02,3.698528E+02,3.450849E+02,5.567634E+02,5.194786E+02,
4.846907E+02,4.522324E+02,4.219478E+02,3.936912E+02,3.673269E+02,5.775159E+02,
5.388414E+02,5.027568E+02,4.690887E+02,4.376752E+02,4.083654E+02,3.810184E+02,
5.902906E+02,5.507606E+02,5.138778E+02,4.794649E+02,4.473566E+02,4.173985E+02,
3.894465E+02,3.633665E+02,3.390329E+02,3.163289E+02,2.951453E+02,2.753803E+02,
2.569389E+02,2.397325E+02,2.236783E+02,2.086992E+02,1.947233E+02,1.816832E+02,
1.695165E+02,1.581644E+02,1.475726E+02,1.376901E+02,1.284694E+02,1.198662E+02,
1.118392E+02,1.043496E+02,9.736164E+01,9.084162E+01,8.475823E+01,7.908222E+01,
7.378632E+01,6.884507E+01,6.423472E+01,5.993312E+01,5.591958E+01,5.217481E+01,
4.868082E+01,4.542081E+01,4.237911E+01,3.954111E+01,3.689316E+01,3.442254E+01,
3.211736E+01,2.996656E+01,2.795979E+01,2.608740E+01,2.434041E+01,2.271040E+01,
2.118956E+01,1.977056E+01,1.844658E+01,1.721127E+01,1.605868E+01,1.498328E+01,
1.397989E+01,1.304370E+01,1.217020E+01,1.135520E+01,1.059478E+01,9.885278E+00,
9.223290E+00,8.605634E+00,8.029341E+00,7.491640E+00,6.989947E+00,6.521851E+00,
6.085102E+00,5.677601E+00,5.297389E+00,4.942639E+00,4.611645E+00,4.302817E+00,
4.014670E+00,3.745820E+00,3.494973E+00,3.260926E+00,3.042551E+00,2.838801E+00,
2.648695E+00,2.471319E+00,2.305823E+00,2.151409E+00,2.007335E+00,1.872910E+00,
1.747487E+00,1.630463E+00,1.521276E+00,1.419400E+00,1.324347E+00,1.235660E+00,
1.152911E+00,1.075704E+00,1.003668E+00,9.364550E-01,8.737434E-01,8.152314E-01,
7.606378E-01,7.097001E-01,6.621737E-01,6.178299E-01,5.764556E-01,5.378521E-01,
5.018338E-01,4.682275E-01,4.368717E-01,4.076157E-01,3.803189E-01,3.548501E-01,
3.310868E-01,3.089149E-01,2.882278E-01,2.689261E-01,2.509169E-01,2.341137E-01,
2.184358E-01,2.038078E-01,1.901594E-01,1.774250E-01,1.655434E-01,1.544575E-01,
1.441139E-01,1.344630E-01,1.254584E-01,1.170569E-01,1.092179E-01,1.019039E-01,
9.507972E-02,8.871252E-02,8.277171E-02,7.722873E-02,7.205696E-02,6.723152E-02,
6.272922E-02,5.852844E-02,5.460896E-02,5.095196E-02,4.753986E-02,4.435626E-02,
4.138585E-02,3.861437E-02,3.602848E-02,3.361576E-02,3.136461E-02,2.926422E-02,
2.730448E-02,2.547598E-02,2.376993E-02,2.217813E-02,2.069293E-02,1.930718E-02,
1.801424E-02,1.680788E-02,1.568231E-02,1.463211E-02,1.365224E-02,1.273799E-02,
1.188497E-02,1.108906E-02,1.034646E-02,9.653592E-03,9.007119E-03,8.403940E-03,
7.841153E-03,7.316054E-03,6.826120E-03,6.368995E-03,5.942483E-03,5.544532E-03,
5.173232E-03,4.826796E-03,4.503560E-03,4.201970E-03,3.920576E-03,3.658027E-03,
3.413060E-03,3.184498E-03,2.971241E-03,2.772266E-03,2.586616E-03,2.413398E-03,
2.251780E-03,2.100985E-03,1.960288E-03,1.829014E-03,1.706530E-03,1.592249E-03,
1.485621E-03,1.386133E-03,1.293308E-03,1.206699E-03,1.125890E-03,1.050492E-03,
9.801441E-04,9.145068E-04,8.532650E-04,7.961244E-04,7.428103E-04,6.930666E-04,
6.466540E-04,6.033495E-04,5.629450E-04,5.252462E-04,4.900721E-04,4.572534E-04,
4.266325E-04,3.980622E-04,3.714052E-04,3.465333E-04,3.233270E-04,3.016747E-04,
2.814725E-04,2.626231E-04,2.450360E-04,2.286267E-04,2.133163E-04,1.990311E-04,
1.857026E-04,1.732666E-04,1.616635E-04,1.508374E-04,1.407362E-04,1.313116E-04,
1.225180E-04,1.143133E-04,1.066581E-04,9.951555E-05,9.285129E-05,8.663332E-05,
8.083174E-05,7.541868E-05,7.036812E-05,6.565578E-05,6.125901E-05,5.715667E-05,
5.332906E-05,4.975778E-05,4.642565E-05,4.331666E-05,4.041587E-05,3.770934E-05,
3.518406E-05,3.282789E-05,3.062950E-05,2.857834E-05,2.666453E-05,2.487889E-05,
2.321282E-05,2.165833E-05,2.020794E-05,1.885467E-05,1.759203E-05,1.641394E-05,
1.531475E-05,1.428917E-05,1.333227E-05,1.243944E-05,1.160641E-05,1.082916E-05,
1.010397E-05,9.427336E-06,8.796015E-06,8.206972E-06,7.657376E-06,7.144584E-06,
6.666133E-06,6.219722E-06,5.803206E-06,5.414582E-06,5.051984E-06,4.713668E-06,
4.398008E-06,4.103486E-06,3.828688E-06,3.572292E-06,3.333066E-06,3.109861E-06,
2.901603E-06,2.707291E-06,2.525992E-06,2.356834E-06,2.199004E-06,2.051743E-06,
1.914344E-06,1.786146E-06,1.666533E-06,1.554930E-06,1.450801E-06,1.353646E-06,
1.262996E-06,1.178417E-06,1.099502E-06,1.025872E-06,9.571720E-07,8.930730E-07,
8.332666E-07,7.774652E-07,7.254007E-07,6.768228E-07,6.314980E-07,5.892085E-07,
5.497509E-07,5.129358E-07,4.785860E-07,4.465365E-07,4.166333E-07,3.887326E-07,
3.627004E-07,3.384114E-07,3.157490E-07,2.946042E-07,2.748755E-07,2.564679E-07,
2.392930E-07,2.232683E-07,2.083167E-07,1.943663E-07,1.813502E-07,1.692057E-07,
1.578745E-07,1.473021E-07,1.374377E-07,1.282339E-07,1.196465E-07,1.116341E-07],
[2.347878E+01,2.190648E+01,2.043947E+01,1.907070E+01,1.779359E+01,1.660201E+01,
1.549022E+01,3.793167E+01,3.539150E+01,3.302144E+01,3.081009E+01,2.874683E+01,
2.682174E+01,2.502557E+01,4.682847E+01,4.369250E+01,4.076655E+01,3.803653E+01,
3.548934E+01,3.311273E+01,3.089527E+01,5.230509E+01,4.880237E+01,4.553422E+01,
4.248493E+01,3.963984E+01,3.698528E+01,3.450849E+01,5.567634E+01,5.194786E+01,
4.846907E+01,4.522324E+01,4.219478E+01,3.936912E+01,3.673269E+01,5.775159E+01,
5.388414E+01,5.027568E+01,4.690887E+01,4.376752E+01,4.083654E+01,3.810184E+01,
5.902906E+01,5.507606E+01,5.138778E+01,4.794649E+01,4.473566E+01,4.173985E+01,
3.894465E+01,3.633665E+01,3.390329E+01,3.163289E+01,2.951453E+01,2.753803E+01,
2.569389E+01,2.397325E+01,2.236783E+01,2.086992E+01,1.947233E+01,1.816832E+01,
1.695165E+01,1.581644E+01,1.475726E+01,1.376901E+01,1.284694E+01,1.198662E+01,
1.118392E+01,1.043496E+01,9.736164E+00,9.084162E+00,8.475823E+00,7.908222E+00,
7.378632E+00,6.884507E+00,6.423472E+00,5.993312E+00,5.591958E+00,5.217481E+00,
4.868082E+00,4.542081E+00,4.237911E+00,3.954111E+00,3.689316E+00,3.442254E+00,
3.211736E+00,2.996656E+00,2.795979E+00,2.608740E+00,2.434041E+00,2.271040E+00,
2.118956E+00,1.977056E+00,1.844658E+00,1.721127E+00,1.605868E+00,1.498328E+00,
1.397989E+00,1.304370E+00,1.217020E+00,1.135520E+00,1.059478E+00,9.885278E-01,
9.223290E-01,8.605634E-01,8.029341E-01,7.491640E-01,6.989947E-01,6.521851E-01,
6.085102E-01,5.677601E-01,5.297389E-01,4.942639E-01,4.611645E-01,4.302817E-01,
4.014670E-01,3.745820E-01,3.494973E-01,3.260926E-01,3.042551E-01,2.838801E-01,
2.648695E-01,2.471319E-01,2.305823E-01,2.151409E-01,2.007335E-01,1.872910E-01,
1.747487E-01,1.630463E-01,1.521276E-01,1.419400E-01,1.324347E-01,1.235660E-01,
1.152911E-01,1.075704E-01,1.003668E-01,9.364550E-02,8.737434E-02,8.152314E-02,
7.606378E-02,7.097001E-02,6.621737E-02,6.178299E-02,5.764556E-02,5.378521E-02,
5.018338E-02,4.682275E-02,4.368717E-02,4.076157E-02,3.803189E-02,3.548501E-02,
3.310868E-02,3.089149E-02,2.882278E-02,2.689261E-02,2.509169E-02,2.341137E-02,
2.184358E-02,2.038078E-02,1.901594E-02,1.774250E-02,1.655434E-02,1.544575E-02,
1.441139E-02,1.344630E-02,1.254584E-02,1.170569E-02,1.092179E-02,1.019039E-02,
9.507972E-03,8.871252E-03,8.277171E-03,7.722873E-03,7.205696E-03,6.723152E-03,
6.272922E-03,5.852844E-03,5.460896E-03,5.095196E-03,4.753986E-03,4.435626E-03,
4.138585E-03,3.861437E-03,3.602848E-03,3.361576E-03,3.136461E-03,2.926422E-03,
2.730448E-03,2.547598E-03,2.376993E-03,2.217813E-03,2.069293E-03,1.930718E-03,
1.801424E-03,1.680788E-03,1.568231E-03,1.463211E-03,1.365224E-03,1.273799E-03,
1.188497E-03,1.108906E-03,1.034646E-03,9.653592E-04,9.007119E-04,8.403940E-04,
7.841153E-04,7.316054E-04,6.826120E-04,6.368995E-04,5.942483E-04,5.544532E-04,
5.173232E-04,4.826796E-04,4.503560E-04,4.201970E-04,3.920576E-04,3.658027E-04,
3.413060E-04,3.184498E-04,2.971241E-04,2.772266E-04,2.586616E-04,2.413398E-04,
2.251780E-04,2.100985E-04,1.960288E-04,1.829014E-04,1.706530E-04,1.592249E-04,
1.485621E-04,1.386133E-04,1.293308E-04,1.206699E-04,1.125890E-04,1.050492E-04,
9.801441E-05,9.145068E-05,8.532650E-05,7.961244E-05,7.428103E-05,6.930666E-05,
6.466540E-05,6.033495E-05,5.629450E-05,5.252462E-05,4.900721E-05,4.572534E-05,
4.266325E-05,3.980622E-05,3.714052E-05,3.465333E-05,3.233270E-05,3.016747E-05,
2.814725E-05,2.626231E-05,2.450360E-05,2.286267E-05,2.133163E-05,1.990311E-05,
1.857026E-05,1.732666E-05,1.616635E-05,1.508374E-05,1.407362E-05,1.313116E-05,
1.225180E-05,1.143133E-05,1.066581E-05,9.951555E-06,9.285129E-06,8.663332E-06,
8.083174E-06,7.541868E-06,7.036812E-06,6.565578E-06,6.125901E-06,5.715667E-06,
5.332906E-06,4.975778E-06,4.642565E-06,4.331666E-06,4.041587E-06,3.770934E-06,
3.518406E-06,3.282789E-06,3.062950E-06,2.857834E-06,2.666453E-06,2.487889E-06,
2.321282E-06,2.165833E-06,2.020794E-06,1.885467E-06,1.759203E-06,1.641394E-06,
1.531475E-06,1.428917E-06,1.333227E-06,1.243944E-06,1.160641E-06,1.082916E-06,
1.010397E-06,9.427336E-07,8.796015E-07,8.206972E-07,7.657376E-07,7.144584E-07,
6.666133E-07,6.219722E-07,5.803206E-07,5.414582E-07,5.051984E-07,4.713668E-07,
4.398008E-07,4.103486E-07,3.828688E-07,3.572292E-07,3.333066E-07,3.109861E-07,
2.901603E-07,2.707291E-07,2.525992E-07,2.356834E-07,2.199004E-07,2.051743E-07,
1.914344E-07,1.786146E-07,1.666533E-07,1.554930E-07,1.450801E-07,1.353646E-07,
1.262996E-07,1.178417E-07,1.099502E-07,1.025872E-07,9.571720E-08,8.930730E-08,
8.332666E-08,7.774652E-08,7.254007E-08,6.768228E-08,6.314980E-08,5.892085E-08,
5.497509E-08,5.129358E-08,4.785860E-08,4.465365E-08,4.166333E-08,3.887326E-08,
3.627004E-08,3.384114E-08,3.157490E-08,2.946042E-08,2.748755E-08,2.564679E-08,
2.392930E-08,2.232683E-08,2.083167E-08,1.943663E-08,1.813502E-08,1.692057E-08,
1.578745E-08,1.473021E-08,1.374377E-08,1.282339E-08,1.196465E-08,1.116341E-08],
[6.664600E-01,6.218291E-01,5.801871E-01,5.413337E-01,5.050822E-01,4.712584E-01,
4.396996E-01,1.076714E+00,1.004610E+00,9.373342E-01,8.745637E-01,8.159968E-01,
7.613519E-01,7.103665E-01,1.329255E+00,1.240239E+00,1.157184E+00,1.079691E+00,
1.007387E+00,9.399255E-01,8.769815E-01,1.484713E+00,1.385286E+00,1.292517E+00,
1.205961E+00,1.125202E+00,1.049850E+00,9.795450E-01,1.580408E+00,1.474573E+00,
1.375825E+00,1.283690E+00,1.197725E+00,1.117517E+00,1.042680E+00,1.639315E+00,
1.529535E+00,1.427107E+00,1.331538E+00,1.242369E+00,1.159171E+00,1.081545E+00,
1.675577E+00,1.563368E+00,1.458674E+00,1.360991E+00,1.269850E+00,1.184812E+00,
1.105468E+00,1.031439E+00,9.623662E-01,8.979194E-01,8.377884E-01,7.816842E-01,
7.293372E-01,6.804956E-01,6.349249E-01,5.924059E-01,5.527342E-01,5.157193E-01,
4.811831E-01,4.489597E-01,4.188942E-01,3.908421E-01,3.646686E-01,3.402478E-01,
3.174624E-01,2.962029E-01,2.763671E-01,2.578596E-01,2.405915E-01,2.244798E-01,
2.094471E-01,1.954211E-01,1.823343E-01,1.701239E-01,1.587312E-01,1.481015E-01,
1.381836E-01,1.289298E-01,1.202958E-01,1.122399E-01,1.047235E-01,9.771053E-02,
9.116714E-02,8.506195E-02,7.936561E-02,7.405073E-02,6.909178E-02,6.446491E-02,
6.014788E-02,5.611996E-02,5.236177E-02,4.885526E-02,4.558357E-02,4.253098E-02,
3.968280E-02,3.702537E-02,3.454589E-02,3.223245E-02,3.007394E-02,2.805998E-02,
2.618089E-02,2.442763E-02,2.279179E-02,2.126549E-02,1.984140E-02,1.851268E-02,
1.727294E-02,1.611623E-02,1.503697E-02,1.402999E-02,1.309044E-02,1.221382E-02,
1.139589E-02,1.063274E-02,9.920701E-03,9.256341E-03,8.636472E-03,8.058113E-03,
7.518486E-03,7.014995E-03,6.545222E-03,6.106908E-03,5.697947E-03,5.316372E-03,
4.960351E-03,4.628171E-03,4.318236E-03,4.029057E-03,3.759243E-03,3.507498E-03,
3.272611E-03,3.053454E-03,2.848973E-03,2.658186E-03,2.480175E-03,2.314085E-03,
2.159118E-03,2.014528E-03,1.879621E-03,1.753749E-03,1.636305E-03,1.526727E-03,
1.424487E-03,1.329093E-03,1.240088E-03,1.157043E-03,1.079559E-03,1.007264E-03,
9.398107E-04,8.768744E-04,8.181527E-04,7.633635E-04,7.122433E-04,6.645465E-04,
6.200438E-04,5.785213E-04,5.397795E-04,5.036321E-04,4.699053E-04,4.384372E-04,
4.090764E-04,3.816817E-04,3.561217E-04,3.322733E-04,3.100219E-04,2.892607E-04,
2.698897E-04,2.518160E-04,2.349527E-04,2.192186E-04,2.045382E-04,1.908409E-04,
1.780608E-04,1.661366E-04,1.550110E-04,1.446303E-04,1.349449E-04,1.259080E-04,
1.174763E-04,1.096093E-04,1.022691E-04,9.542044E-05,8.903041E-05,8.306831E-05,
7.750548E-05,7.231517E-05,6.747244E-05,6.295401E-05,5.873817E-05,5.480465E-05,
5.113455E-05,4.771022E-05,4.451521E-05,4.153416E-05,3.875274E-05,3.615758E-05,
3.373622E-05,3.147701E-05,2.936908E-05,2.740232E-05,2.556727E-05,2.385511E-05,
2.225760E-05,2.076708E-05,1.937637E-05,1.807879E-05,1.686811E-05,1.573850E-05,
1.468454E-05,1.370116E-05,1.278364E-05,1.192755E-05,1.112880E-05,1.038354E-05,
9.688185E-06,9.039396E-06,8.434055E-06,7.869251E-06,7.342271E-06,6.850581E-06,
6.391818E-06,5.963777E-06,5.564401E-06,5.191770E-06,4.844092E-06,4.519698E-06,
4.217027E-06,3.934626E-06,3.671136E-06,3.425291E-06,3.195909E-06,2.981889E-06,
2.782200E-06,2.595885E-06,2.422046E-06,2.259849E-06,2.108514E-06,1.967313E-06,
1.835568E-06,1.712645E-06,1.597955E-06,1.490944E-06,1.391100E-06,1.297942E-06,
1.211023E-06,1.129924E-06,1.054257E-06,9.836564E-07,9.177839E-07,8.563226E-07,
7.989773E-07,7.454722E-07,6.955501E-07,6.489712E-07,6.055115E-07,5.649622E-07,
5.271284E-07,4.918282E-07,4.588919E-07,4.281613E-07,3.994886E-07,3.727361E-07,
3.477751E-07,3.244856E-07,3.027558E-07,2.824811E-07,2.635642E-07,2.459141E-07,
2.294460E-07,2.140807E-07,1.997443E-07,1.863680E-07,1.738875E-07,1.622428E-07,
1.513779E-07,1.412406E-07,1.317821E-07,1.229571E-07,1.147230E-07,1.070403E-07,
9.987216E-08,9.318402E-08,8.694376E-08,8.112140E-08,7.568894E-08,7.062028E-08,
6.589105E-08,6.147853E-08,5.736149E-08,5.352016E-08,4.993608E-08,4.659201E-08,
4.347188E-08,4.056070E-08,3.784447E-08,3.531014E-08,3.294553E-08,3.073926E-08,
2.868075E-08,2.676008E-08,2.496804E-08,2.329600E-08,2.173594E-08,2.028035E-08,
1.892224E-08,1.765507E-08,1.647276E-08,1.536963E-08,1.434037E-08,1.338004E-08,
1.248402E-08,1.164800E-08,1.086797E-08,1.014018E-08,9.461118E-09,8.827535E-09,
8.236382E-09,7.684816E-09,7.170187E-09,6.690021E-09,6.242010E-09,5.824001E-09,
5.433985E-09,5.070088E-09,4.730559E-09,4.413768E-09,4.118191E-09,3.842408E-09,
3.585093E-09,3.345010E-09,3.121005E-09,2.912001E-09,2.716993E-09,2.535044E-09,
2.365279E-09,2.206884E-09,2.059095E-09,1.921204E-09,1.792547E-09,1.672505E-09,
1.560502E-09,1.456000E-09,1.358496E-09,1.267522E-09,1.182640E-09,1.103442E-09,
1.029548E-09,9.606020E-10,8.962733E-10,8.362526E-10,7.802512E-10,7.280002E-10,
6.792482E-10,6.337609E-10,5.913199E-10,5.517209E-10,5.147738E-10,4.803010E-10,
4.481367E-10,4.181263E-10,3.901256E-10,3.640001E-10,3.396241E-10,3.168805E-10]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
ted_empty.lipid_earthworm = 0.01
ted_empty.density_earthworm = 1.0
# input variables that change per simulation
ted_empty.log_kow = pd.Series([5.0, 4.0, 2.75], dtype='float')
# internally calculated variables
pore_h2o_conc = pd.Series([[2.347878E-01,2.190648E-01,2.043947E-01,1.907070E-01,1.779359E-01,1.660201E-01,
1.549022E-01,3.793167E-01,3.539150E-01,3.302144E-01,3.081009E-01,2.874683E-01,
2.682174E-01,2.502557E-01,4.682847E-01,4.369250E-01,4.076655E-01,3.803653E-01,
3.548934E-01,3.311273E-01,3.089527E-01,5.230509E-01,4.880237E-01,4.553422E-01,
4.248493E-01,3.963984E-01,3.698528E-01,3.450849E-01,5.567634E-01,5.194786E-01,
4.846907E-01,4.522324E-01,4.219478E-01,3.936912E-01,3.673269E-01,5.775159E-01,
5.388414E-01,5.027568E-01,4.690887E-01,4.376752E-01,4.083654E-01,3.810184E-01,
5.902906E-01,5.507606E-01,5.138778E-01,4.794649E-01,4.473566E-01,4.173985E-01,
3.894465E-01,3.633665E-01,3.390329E-01,3.163289E-01,2.951453E-01,2.753803E-01,
2.569389E-01,2.397325E-01,2.236783E-01,2.086992E-01,1.947233E-01,1.816832E-01,
1.695165E-01,1.581644E-01,1.475726E-01,1.376901E-01,1.284694E-01,1.198662E-01,
1.118392E-01,1.043496E-01,9.736164E-02,9.084162E-02,8.475823E-02,7.908222E-02,
7.378632E-02,6.884507E-02,6.423472E-02,5.993312E-02,5.591958E-02,5.217481E-02,
4.868082E-02,4.542081E-02,4.237911E-02,3.954111E-02,3.689316E-02,3.442254E-02,
3.211736E-02,2.996656E-02,2.795979E-02,2.608740E-02,2.434041E-02,2.271040E-02,
2.118956E-02,1.977056E-02,1.844658E-02,1.721127E-02,1.605868E-02,1.498328E-02,
1.397989E-02,1.304370E-02,1.217020E-02,1.135520E-02,1.059478E-02,9.885278E-03,
9.223290E-03,8.605634E-03,8.029341E-03,7.491640E-03,6.989947E-03,6.521851E-03,
6.085102E-03,5.677601E-03,5.297389E-03,4.942639E-03,4.611645E-03,4.302817E-03,
4.014670E-03,3.745820E-03,3.494973E-03,3.260926E-03,3.042551E-03,2.838801E-03,
2.648695E-03,2.471319E-03,2.305823E-03,2.151409E-03,2.007335E-03,1.872910E-03,
1.747487E-03,1.630463E-03,1.521276E-03,1.419400E-03,1.324347E-03,1.235660E-03,
1.152911E-03,1.075704E-03,1.003668E-03,9.364550E-04,8.737434E-04,8.152314E-04,
7.606378E-04,7.097001E-04,6.621737E-04,6.178299E-04,5.764556E-04,5.378521E-04,
5.018338E-04,4.682275E-04,4.368717E-04,4.076157E-04,3.803189E-04,3.548501E-04,
3.310868E-04,3.089149E-04,2.882278E-04,2.689261E-04,2.509169E-04,2.341137E-04,
2.184358E-04,2.038078E-04,1.901594E-04,1.774250E-04,1.655434E-04,1.544575E-04,
1.441139E-04,1.344630E-04,1.254584E-04,1.170569E-04,1.092179E-04,1.019039E-04,
9.507972E-05,8.871252E-05,8.277171E-05,7.722873E-05,7.205696E-05,6.723152E-05,
6.272922E-05,5.852844E-05,5.460896E-05,5.095196E-05,4.753986E-05,4.435626E-05,
4.138585E-05,3.861437E-05,3.602848E-05,3.361576E-05,3.136461E-05,2.926422E-05,
2.730448E-05,2.547598E-05,2.376993E-05,2.217813E-05,2.069293E-05,1.930718E-05,
1.801424E-05,1.680788E-05,1.568231E-05,1.463211E-05,1.365224E-05,1.273799E-05,
1.188497E-05,1.108906E-05,1.034646E-05,9.653592E-06,9.007119E-06,8.403940E-06,
7.841153E-06,7.316054E-06,6.826120E-06,6.368995E-06,5.942483E-06,5.544532E-06,
5.173232E-06,4.826796E-06,4.503560E-06,4.201970E-06,3.920576E-06,3.658027E-06,
3.413060E-06,3.184498E-06,2.971241E-06,2.772266E-06,2.586616E-06,2.413398E-06,
2.251780E-06,2.100985E-06,1.960288E-06,1.829014E-06,1.706530E-06,1.592249E-06,
1.485621E-06,1.386133E-06,1.293308E-06,1.206699E-06,1.125890E-06,1.050492E-06,
9.801441E-07,9.145068E-07,8.532650E-07,7.961244E-07,7.428103E-07,6.930666E-07,
6.466540E-07,6.033495E-07,5.629450E-07,5.252462E-07,4.900721E-07,4.572534E-07,
4.266325E-07,3.980622E-07,3.714052E-07,3.465333E-07,3.233270E-07,3.016747E-07,
2.814725E-07,2.626231E-07,2.450360E-07,2.286267E-07,2.133163E-07,1.990311E-07,
1.857026E-07,1.732666E-07,1.616635E-07,1.508374E-07,1.407362E-07,1.313116E-07,
1.225180E-07,1.143133E-07,1.066581E-07,9.951555E-08,9.285129E-08,8.663332E-08,
8.083174E-08,7.541868E-08,7.036812E-08,6.565578E-08,6.125901E-08,5.715667E-08,
5.332906E-08,4.975778E-08,4.642565E-08,4.331666E-08,4.041587E-08,3.770934E-08,
3.518406E-08,3.282789E-08,3.062950E-08,2.857834E-08,2.666453E-08,2.487889E-08,
2.321282E-08,2.165833E-08,2.020794E-08,1.885467E-08,1.759203E-08,1.641394E-08,
1.531475E-08,1.428917E-08,1.333227E-08,1.243944E-08,1.160641E-08,1.082916E-08,
1.010397E-08,9.427336E-09,8.796015E-09,8.206972E-09,7.657376E-09,7.144584E-09,
6.666133E-09,6.219722E-09,5.803206E-09,5.414582E-09,5.051984E-09,4.713668E-09,
4.398008E-09,4.103486E-09,3.828688E-09,3.572292E-09,3.333066E-09,3.109861E-09,
2.901603E-09,2.707291E-09,2.525992E-09,2.356834E-09,2.199004E-09,2.051743E-09,
1.914344E-09,1.786146E-09,1.666533E-09,1.554930E-09,1.450801E-09,1.353646E-09,
1.262996E-09,1.178417E-09,1.099502E-09,1.025872E-09,9.571720E-10,8.930730E-10,
8.332666E-10,7.774652E-10,7.254007E-10,6.768228E-10,6.314980E-10,5.892085E-10,
5.497509E-10,5.129358E-10,4.785860E-10,4.465365E-10,4.166333E-10,3.887326E-10,
3.627004E-10,3.384114E-10,3.157490E-10,2.946042E-10,2.748755E-10,2.564679E-10,
2.392930E-10,2.232683E-10,2.083167E-10,1.943663E-10,1.813502E-10,1.692057E-10,
1.578745E-10,1.473021E-10,1.374377E-10,1.282339E-10,1.196465E-10,1.116341E-10],
[2.347878E-01,2.190648E-01,2.043947E-01,1.907070E-01,1.779359E-01,1.660201E-01,
1.549022E-01,3.793167E-01,3.539150E-01,3.302144E-01,3.081009E-01,2.874683E-01,
2.682174E-01,2.502557E-01,4.682847E-01,4.369250E-01,4.076655E-01,3.803653E-01,
3.548934E-01,3.311273E-01,3.089527E-01,5.230509E-01,4.880237E-01,4.553422E-01,
4.248493E-01,3.963984E-01,3.698528E-01,3.450849E-01,5.567634E-01,5.194786E-01,
4.846907E-01,4.522324E-01,4.219478E-01,3.936912E-01,3.673269E-01,5.775159E-01,
5.388414E-01,5.027568E-01,4.690887E-01,4.376752E-01,4.083654E-01,3.810184E-01,
5.902906E-01,5.507606E-01,5.138778E-01,4.794649E-01,4.473566E-01,4.173985E-01,
3.894465E-01,3.633665E-01,3.390329E-01,3.163289E-01,2.951453E-01,2.753803E-01,
2.569389E-01,2.397325E-01,2.236783E-01,2.086992E-01,1.947233E-01,1.816832E-01,
1.695165E-01,1.581644E-01,1.475726E-01,1.376901E-01,1.284694E-01,1.198662E-01,
1.118392E-01,1.043496E-01,9.736164E-02,9.084162E-02,8.475823E-02,7.908222E-02,
7.378632E-02,6.884507E-02,6.423472E-02,5.993312E-02,5.591958E-02,5.217481E-02,
4.868082E-02,4.542081E-02,4.237911E-02,3.954111E-02,3.689316E-02,3.442254E-02,
3.211736E-02,2.996656E-02,2.795979E-02,2.608740E-02,2.434041E-02,2.271040E-02,
2.118956E-02,1.977056E-02,1.844658E-02,1.721127E-02,1.605868E-02,1.498328E-02,
1.397989E-02,1.304370E-02,1.217020E-02,1.135520E-02,1.059478E-02,9.885278E-03,
9.223290E-03,8.605634E-03,8.029341E-03,7.491640E-03,6.989947E-03,6.521851E-03,
6.085102E-03,5.677601E-03,5.297389E-03,4.942639E-03,4.611645E-03,4.302817E-03,
4.014670E-03,3.745820E-03,3.494973E-03,3.260926E-03,3.042551E-03,2.838801E-03,
2.648695E-03,2.471319E-03,2.305823E-03,2.151409E-03,2.007335E-03,1.872910E-03,
1.747487E-03,1.630463E-03,1.521276E-03,1.419400E-03,1.324347E-03,1.235660E-03,
1.152911E-03,1.075704E-03,1.003668E-03,9.364550E-04,8.737434E-04,8.152314E-04,
7.606378E-04,7.097001E-04,6.621737E-04,6.178299E-04,5.764556E-04,5.378521E-04,
5.018338E-04,4.682275E-04,4.368717E-04,4.076157E-04,3.803189E-04,3.548501E-04,
3.310868E-04,3.089149E-04,2.882278E-04,2.689261E-04,2.509169E-04,2.341137E-04,
2.184358E-04,2.038078E-04,1.901594E-04,1.774250E-04,1.655434E-04,1.544575E-04,
1.441139E-04,1.344630E-04,1.254584E-04,1.170569E-04,1.092179E-04,1.019039E-04,
9.507972E-05,8.871252E-05,8.277171E-05,7.722873E-05,7.205696E-05,6.723152E-05,
6.272922E-05,5.852844E-05,5.460896E-05,5.095196E-05,4.753986E-05,4.435626E-05,
4.138585E-05,3.861437E-05,3.602848E-05,3.361576E-05,3.136461E-05,2.926422E-05,
2.730448E-05,2.547598E-05,2.376993E-05,2.217813E-05,2.069293E-05,1.930718E-05,
1.801424E-05,1.680788E-05,1.568231E-05,1.463211E-05,1.365224E-05,1.273799E-05,
1.188497E-05,1.108906E-05,1.034646E-05,9.653592E-06,9.007119E-06,8.403940E-06,
7.841153E-06,7.316054E-06,6.826120E-06,6.368995E-06,5.942483E-06,5.544532E-06,
5.173232E-06,4.826796E-06,4.503560E-06,4.201970E-06,3.920576E-06,3.658027E-06,
3.413060E-06,3.184498E-06,2.971241E-06,2.772266E-06,2.586616E-06,2.413398E-06,
2.251780E-06,2.100985E-06,1.960288E-06,1.829014E-06,1.706530E-06,1.592249E-06,
1.485621E-06,1.386133E-06,1.293308E-06,1.206699E-06,1.125890E-06,1.050492E-06,
9.801441E-07,9.145068E-07,8.532650E-07,7.961244E-07,7.428103E-07,6.930666E-07,
6.466540E-07,6.033495E-07,5.629450E-07,5.252462E-07,4.900721E-07,4.572534E-07,
4.266325E-07,3.980622E-07,3.714052E-07,3.465333E-07,3.233270E-07,3.016747E-07,
2.814725E-07,2.626231E-07,2.450360E-07,2.286267E-07,2.133163E-07,1.990311E-07,
1.857026E-07,1.732666E-07,1.616635E-07,1.508374E-07,1.407362E-07,1.313116E-07,
1.225180E-07,1.143133E-07,1.066581E-07,9.951555E-08,9.285129E-08,8.663332E-08,
8.083174E-08,7.541868E-08,7.036812E-08,6.565578E-08,6.125901E-08,5.715667E-08,
5.332906E-08,4.975778E-08,4.642565E-08,4.331666E-08,4.041587E-08,3.770934E-08,
3.518406E-08,3.282789E-08,3.062950E-08,2.857834E-08,2.666453E-08,2.487889E-08,
2.321282E-08,2.165833E-08,2.020794E-08,1.885467E-08,1.759203E-08,1.641394E-08,
1.531475E-08,1.428917E-08,1.333227E-08,1.243944E-08,1.160641E-08,1.082916E-08,
1.010397E-08,9.427336E-09,8.796015E-09,8.206972E-09,7.657376E-09,7.144584E-09,
6.666133E-09,6.219722E-09,5.803206E-09,5.414582E-09,5.051984E-09,4.713668E-09,
4.398008E-09,4.103486E-09,3.828688E-09,3.572292E-09,3.333066E-09,3.109861E-09,
2.901603E-09,2.707291E-09,2.525992E-09,2.356834E-09,2.199004E-09,2.051743E-09,
1.914344E-09,1.786146E-09,1.666533E-09,1.554930E-09,1.450801E-09,1.353646E-09,
1.262996E-09,1.178417E-09,1.099502E-09,1.025872E-09,9.571720E-10,8.930730E-10,
8.332666E-10,7.774652E-10,7.254007E-10,6.768228E-10,6.314980E-10,5.892085E-10,
5.497509E-10,5.129358E-10,4.785860E-10,4.465365E-10,4.166333E-10,3.887326E-10,
3.627004E-10,3.384114E-10,3.157490E-10,2.946042E-10,2.748755E-10,2.564679E-10,
2.392930E-10,2.232683E-10,2.083167E-10,1.943663E-10,1.813502E-10,1.692057E-10,
1.578745E-10,1.473021E-10,1.374377E-10,1.282339E-10,1.196465E-10,1.116341E-10],
[1.185152E-01,1.105786E-01,1.031735E-01,9.626426E-02,8.981773E-02,8.380291E-02,
7.819088E-02,1.914699E-01,1.786477E-01,1.666842E-01,1.555219E-01,1.451070E-01,
1.353896E-01,1.263230E-01,2.363787E-01,2.205492E-01,2.057796E-01,1.919992E-01,
1.791416E-01,1.671450E-01,1.559518E-01,2.640234E-01,2.463425E-01,2.298457E-01,
2.144536E-01,2.000923E-01,1.866927E-01,1.741905E-01,2.810407E-01,2.622202E-01,
2.446601E-01,2.282760E-01,2.129890E-01,1.987258E-01,1.854177E-01,2.915160E-01,
2.719941E-01,2.537794E-01,2.367846E-01,2.209278E-01,2.061330E-01,1.923289E-01,
2.979644E-01,2.780106E-01,2.593931E-01,2.420223E-01,2.258148E-01,2.106926E-01,
1.965832E-01,1.834186E-01,1.711356E-01,1.596752E-01,1.489822E-01,1.390053E-01,
1.296965E-01,1.210111E-01,1.129074E-01,1.053463E-01,9.829159E-02,9.170929E-02,
8.556780E-02,7.983758E-02,7.449109E-02,6.950265E-02,6.484826E-02,6.050557E-02,
5.645369E-02,5.267316E-02,4.914579E-02,4.585465E-02,4.278390E-02,3.991879E-02,
3.724555E-02,3.475132E-02,3.242413E-02,3.025278E-02,2.822685E-02,2.633658E-02,
2.457290E-02,2.292732E-02,2.139195E-02,1.995939E-02,1.862277E-02,1.737566E-02,
1.621207E-02,1.512639E-02,1.411342E-02,1.316829E-02,1.228645E-02,1.146366E-02,
1.069597E-02,9.979697E-03,9.311387E-03,8.687831E-03,8.106033E-03,7.563196E-03,
7.056711E-03,6.584145E-03,6.143224E-03,5.731831E-03,5.347987E-03,4.989849E-03,
4.655693E-03,4.343915E-03,4.053016E-03,3.781598E-03,3.528356E-03,3.292072E-03,
3.071612E-03,2.865915E-03,2.673994E-03,2.494924E-03,2.327847E-03,2.171958E-03,
2.026508E-03,1.890799E-03,1.764178E-03,1.646036E-03,1.535806E-03,1.432958E-03,
1.336997E-03,1.247462E-03,1.163923E-03,1.085979E-03,1.013254E-03,9.453995E-04,
8.820889E-04,8.230181E-04,7.679030E-04,7.164788E-04,6.684984E-04,6.237311E-04,
5.819617E-04,5.429894E-04,5.066271E-04,4.726998E-04,4.410445E-04,4.115090E-04,
3.839515E-04,3.582394E-04,3.342492E-04,3.118655E-04,2.909808E-04,2.714947E-04,
2.533135E-04,2.363499E-04,2.205222E-04,2.057545E-04,1.919758E-04,1.791197E-04,
1.671246E-04,1.559328E-04,1.454904E-04,1.357474E-04,1.266568E-04,1.181749E-04,
1.102611E-04,1.028773E-04,9.598788E-05,8.955986E-05,8.356230E-05,7.796638E-05,
7.274521E-05,6.787368E-05,6.332838E-05,5.908747E-05,5.513056E-05,5.143863E-05,
4.799394E-05,4.477993E-05,4.178115E-05,3.898319E-05,3.637260E-05,3.393684E-05,
3.166419E-05,2.954373E-05,2.756528E-05,2.571931E-05,2.399697E-05,2.238996E-05,
2.089058E-05,1.949160E-05,1.818630E-05,1.696842E-05,1.583210E-05,1.477187E-05,
1.378264E-05,1.285966E-05,1.199848E-05,1.119498E-05,1.044529E-05,9.745798E-06,
9.093151E-06,8.484210E-06,7.916048E-06,7.385934E-06,6.891320E-06,6.429829E-06,
5.999242E-06,5.597491E-06,5.222644E-06,4.872899E-06,4.546575E-06,4.242105E-06,
3.958024E-06,3.692967E-06,3.445660E-06,3.214914E-06,2.999621E-06,2.798745E-06,
2.611322E-06,2.436449E-06,2.273288E-06,2.121052E-06,1.979012E-06,1.846483E-06,
1.722830E-06,1.607457E-06,1.499811E-06,1.399373E-06,1.305661E-06,1.218225E-06,
1.136644E-06,1.060526E-06,9.895060E-07,9.232417E-07,8.614150E-07,8.037286E-07,
7.499053E-07,6.996864E-07,6.528305E-07,6.091124E-07,5.683219E-07,5.302631E-07,
4.947530E-07,4.616209E-07,4.307075E-07,4.018643E-07,3.749526E-07,3.498432E-07,
3.264152E-07,3.045562E-07,2.841610E-07,2.651316E-07,2.473765E-07,2.308104E-07,
2.153537E-07,2.009321E-07,1.874763E-07,1.749216E-07,1.632076E-07,1.522781E-07,
1.420805E-07,1.325658E-07,1.236882E-07,1.154052E-07,1.076769E-07,1.004661E-07,
9.373816E-08,8.746080E-08,8.160381E-08,7.613905E-08,7.104024E-08,6.628289E-08,
6.184412E-08,5.770261E-08,5.383844E-08,5.023304E-08,4.686908E-08,4.373040E-08,
4.080190E-08,3.806952E-08,3.552012E-08,3.314144E-08,3.092206E-08,2.885130E-08,
2.691922E-08,2.511652E-08,2.343454E-08,2.186520E-08,2.040095E-08,1.903476E-08,
1.776006E-08,1.657072E-08,1.546103E-08,1.442565E-08,1.345961E-08,1.255826E-08,
1.171727E-08,1.093260E-08,1.020048E-08,9.517381E-09,8.880030E-09,8.285361E-09,
7.730515E-09,7.212826E-09,6.729804E-09,6.279130E-09,5.858635E-09,5.466300E-09,
5.100238E-09,4.758690E-09,4.440015E-09,4.142681E-09,3.865258E-09,3.606413E-09,
3.364902E-09,3.139565E-09,2.929318E-09,2.733150E-09,2.550119E-09,2.379345E-09,
2.220008E-09,2.071340E-09,1.932629E-09,1.803206E-09,1.682451E-09,1.569782E-09,
1.464659E-09,1.366575E-09,1.275060E-09,1.189673E-09,1.110004E-09,1.035670E-09,
9.663144E-10,9.016032E-10,8.412256E-10,7.848912E-10,7.323294E-10,6.832875E-10,
6.375298E-10,5.948363E-10,5.550019E-10,5.178351E-10,4.831572E-10,4.508016E-10,
4.206128E-10,3.924456E-10,3.661647E-10,3.416437E-10,3.187649E-10,2.974181E-10,
2.775009E-10,2.589175E-10,2.415786E-10,2.254008E-10,2.103064E-10,1.962228E-10,
1.830823E-10,1.708219E-10,1.593824E-10,1.487091E-10,1.387505E-10,1.294588E-10,
1.207893E-10,1.127004E-10,1.051532E-10,9.811140E-11,9.154117E-11,8.541093E-11,
7.969122E-11,7.435454E-11,6.937524E-11,6.472938E-11,6.039465E-11,5.635020E-11]], dtype='float')
for i in range(3):
result[i] = ted_empty.daily_soil_inv_timeseries(i, pore_h2o_conc[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_daily_animal_dose_timeseries(self):
"""
:description generates annual timeseries of daily pesticide concentrations in animals (mammals, birds, amphibians, reptiles)
:param a1; coefficient of allometric expression
:param b1; exponent of allometrice expression
:param body_wgt; body weight of species (g)
:param frac_h2o; fraction of water in food item
:param intake_food_conc; pesticide concentration in food item (daily mg a.i./kg)
:param frac_retained; fraction of ingested food retained by animal (mammals, birds, reptiles/amphibians)
:Notes # calculations are performed daily from day of first application (assumed day 0) through the last day of a year
# note: day numbers are synchronized with 0-based array indexing; thus the year does not have a calendar specific
# assoication, rather it is one year from the day of 1st pesticide application
# this represents Eqs 5&6 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([[]], dtype='float')
expected_results = [[2.860270E+02,3.090209E+02,3.058215E+02,3.001105E+02,2.942541E+02,2.884869E+02,2.828301E+02,
5.633110E+02,5.808675E+02,5.723374E+02,5.614002E+02,5.504201E+02,5.396295E+02,5.290480E+02,
8.047008E+02,8.175238E+02,8.043529E+02,7.888661E+02,7.734255E+02,7.582619E+02,7.433932E+02,
1.014843E+03,1.023545E+03,1.006334E+03,9.868866E+02,9.675630E+02,9.485925E+02,9.299915E+02,
1.197782E+03,1.202897E+03,1.182169E+03,1.159274E+03,1.136569E+03,1.114285E+03,1.092435E+03,
1.357040E+03,1.359032E+03,1.335242E+03,1.309345E+03,1.283698E+03,1.258528E+03,1.233850E+03,
1.495682E+03,1.494955E+03,1.468500E+03,1.439990E+03,1.411781E+03,1.384100E+03,1.356959E+03,
1.330350E+03,1.304262E+03,1.278687E+03,1.253612E+03,1.229030E+03,1.204929E+03,1.181301E+03,
1.158137E+03,1.135426E+03,1.113161E+03,1.091333E+03,1.069932E+03,1.048952E+03,1.028382E+03,
1.008217E+03,9.884460E+02,9.690632E+02,9.500604E+02,9.314303E+02,9.131655E+02,8.952589E+02,
8.777034E+02,8.604922E+02,8.436185E+02,8.270756E+02,8.108572E+02,7.949568E+02,7.793682E+02,
7.640852E+02,7.491020E+02,7.344125E+02,7.200112E+02,7.058922E+02,6.920501E+02,6.784794E+02,
6.651748E+02,6.521312E+02,6.393433E+02,6.268061E+02,6.145148E+02,6.024646E+02,5.906506E+02,
5.790683E+02,5.677131E+02,5.565806E+02,5.456664E+02,5.349662E+02,5.244759E+02,5.141912E+02,
5.041083E+02,4.942230E+02,4.845316E+02,4.750302E+02,4.657152E+02,4.565828E+02,4.476295E+02,
4.388517E+02,4.302461E+02,4.218092E+02,4.135378E+02,4.054286E+02,3.974784E+02,3.896841E+02,
3.820426E+02,3.745510E+02,3.672063E+02,3.600056E+02,3.529461E+02,3.460250E+02,3.392397E+02,
3.325874E+02,3.260656E+02,3.196716E+02,3.134031E+02,3.072574E+02,3.012323E+02,2.953253E+02,
2.895342E+02,2.838566E+02,2.782903E+02,2.728332E+02,2.674831E+02,2.622379E+02,2.570956E+02,
2.520541E+02,2.471115E+02,2.422658E+02,2.375151E+02,2.328576E+02,2.282914E+02,2.238147E+02,
2.194259E+02,2.151231E+02,2.109046E+02,2.067689E+02,2.027143E+02,1.987392E+02,1.948420E+02,
1.910213E+02,1.872755E+02,1.836031E+02,1.800028E+02,1.764730E+02,1.730125E+02,1.696198E+02,
1.662937E+02,1.630328E+02,1.598358E+02,1.567015E+02,1.536287E+02,1.506161E+02,1.476627E+02,
1.447671E+02,1.419283E+02,1.391452E+02,1.364166E+02,1.337416E+02,1.311190E+02,1.285478E+02,
1.260271E+02,1.235557E+02,1.211329E+02,1.187576E+02,1.164288E+02,1.141457E+02,1.119074E+02,
1.097129E+02,1.075615E+02,1.054523E+02,1.033845E+02,1.013571E+02,9.936960E+01,9.742102E+01,
9.551065E+01,9.363775E+01,9.180157E+01,9.000140E+01,8.823652E+01,8.650626E+01,8.480992E+01,
8.314685E+01,8.151639E+01,7.991791E+01,7.835077E+01,7.681436E+01,7.530807E+01,7.383133E+01,
7.238354E+01,7.096414E+01,6.957258E+01,6.820830E+01,6.687078E+01,6.555948E+01,6.427390E+01,
6.301353E+01,6.177787E+01,6.056645E+01,5.937878E+01,5.821440E+01,5.707285E+01,5.595368E+01,
5.485647E+01,5.378076E+01,5.272616E+01,5.169223E+01,5.067857E+01,4.968480E+01,4.871051E+01,
4.775533E+01,4.681887E+01,4.590078E+01,4.500070E+01,4.411826E+01,4.325313E+01,4.240496E+01,
4.157343E+01,4.075820E+01,3.995895E+01,3.917538E+01,3.840718E+01,3.765404E+01,3.691566E+01,
3.619177E+01,3.548207E+01,3.478629E+01,3.410415E+01,3.343539E+01,3.277974E+01,3.213695E+01,
3.150677E+01,3.088894E+01,3.028322E+01,2.968939E+01,2.910720E+01,2.853642E+01,2.797684E+01,
2.742823E+01,2.689038E+01,2.636308E+01,2.584611E+01,2.533929E+01,2.484240E+01,2.435526E+01,
2.387766E+01,2.340944E+01,2.295039E+01,2.250035E+01,2.205913E+01,2.162656E+01,2.120248E+01,
2.078671E+01,2.037910E+01,1.997948E+01,1.958769E+01,1.920359E+01,1.882702E+01,1.845783E+01,
1.809588E+01,1.774104E+01,1.739314E+01,1.705208E+01,1.671770E+01,1.638987E+01,1.606848E+01,
1.575338E+01,1.544447E+01,1.514161E+01,1.484469E+01,1.455360E+01,1.426821E+01,1.398842E+01,
1.371412E+01,1.344519E+01,1.318154E+01,1.292306E+01,1.266964E+01,1.242120E+01,1.217763E+01,
1.193883E+01,1.170472E+01,1.147520E+01,1.125017E+01,1.102957E+01,1.081328E+01,1.060124E+01,
1.039336E+01,1.018955E+01,9.989738E+00,9.793846E+00,9.601794E+00,9.413509E+00,9.228916E+00,
9.047942E+00,8.870518E+00,8.696572E+00,8.526038E+00,8.358848E+00,8.194936E+00,8.034238E+00,
7.876691E+00,7.722234E+00,7.570806E+00,7.422347E+00,7.276799E+00,7.134106E+00,6.994210E+00,
6.857058E+00,6.722595E+00,6.590769E+00,6.461528E+00,6.334822E+00,6.210600E+00,6.088814E+00,
5.969416E+00,5.852359E+00,5.737598E+00,5.625087E+00,5.514783E+00,5.406641E+00,5.300620E+00,
5.196678E+00,5.094775E+00,4.994869E+00,4.896923E+00,4.800897E+00,4.706755E+00,4.614458E+00,
4.523971E+00,4.435259E+00,4.348286E+00,4.263019E+00,4.179424E+00,4.097468E+00,4.017119E+00,
3.938346E+00,3.861117E+00,3.785403E+00,3.711174E+00,3.638400E+00,3.567053E+00,3.497105E+00,
3.428529E+00,3.361298E+00,3.295385E+00,3.230764E+00,3.167411E+00,3.105300E+00,3.044407E+00,
2.984708E+00,2.926180E+00,2.868799E+00,2.812544E+00,2.757391E+00,2.703321E+00,2.650310E+00,
2.598339E+00,2.547387E+00],
[4.583348E+01,4.951806E+01,4.900538E+01,4.809025E+01,4.715181E+01,4.622765E+01,4.532120E+01,
9.026597E+01,9.307926E+01,9.171236E+01,8.995977E+01,8.820030E+01,8.647120E+01,8.477560E+01,
1.289467E+02,1.310015E+02,1.288910E+02,1.264093E+02,1.239351E+02,1.215053E+02,1.191227E+02,
1.626202E+02,1.640147E+02,1.612568E+02,1.581405E+02,1.550440E+02,1.520042E+02,1.490235E+02,
1.919347E+02,1.927544E+02,1.894329E+02,1.857641E+02,1.821259E+02,1.785550E+02,1.750537E+02,
2.174545E+02,2.177737E+02,2.139616E+02,2.098118E+02,2.057021E+02,2.016689E+02,1.977143E+02,
2.396707E+02,2.395543E+02,2.353151E+02,2.307466E+02,2.262263E+02,2.217906E+02,2.174415E+02,
2.131776E+02,2.089973E+02,2.048990E+02,2.008811E+02,1.969419E+02,1.930800E+02,1.892938E+02,
1.855819E+02,1.819427E+02,1.783750E+02,1.748771E+02,1.714479E+02,1.680859E+02,1.647898E+02,
1.615584E+02,1.583904E+02,1.552844E+02,1.522394E+02,1.492541E+02,1.463273E+02,1.434579E+02,
1.406448E+02,1.378868E+02,1.351829E+02,1.325321E+02,1.299332E+02,1.273853E+02,1.248873E+02,
1.224384E+02,1.200374E+02,1.176836E+02,1.153759E+02,1.131134E+02,1.108953E+02,1.087208E+02,
1.065888E+02,1.044987E+02,1.024495E+02,1.004405E+02,9.847096E+01,9.654000E+01,9.464691E+01,
9.279094E+01,9.097137E+01,8.918748E+01,8.743857E+01,8.572395E+01,8.404295E+01,8.239492E+01,
8.077921E+01,7.919518E+01,7.764221E+01,7.611969E+01,7.462703E+01,7.316364E+01,7.172895E+01,
7.032239E+01,6.894341E+01,6.759147E+01,6.626604E+01,6.496660E+01,6.369265E+01,6.244367E+01,
6.121919E+01,6.001872E+01,5.884179E+01,5.768794E+01,5.655671E+01,5.544767E+01,5.436038E+01,
5.329440E+01,5.224933E+01,5.122475E+01,5.022027E+01,4.923548E+01,4.827000E+01,4.732346E+01,
4.639547E+01,4.548569E+01,4.459374E+01,4.371928E+01,4.286197E+01,4.202148E+01,4.119746E+01,
4.038960E+01,3.959759E+01,3.882110E+01,3.805985E+01,3.731352E+01,3.658182E+01,3.586447E+01,
3.516119E+01,3.447170E+01,3.379573E+01,3.313302E+01,3.248330E+01,3.184632E+01,3.122184E+01,
3.060960E+01,3.000936E+01,2.942090E+01,2.884397E+01,2.827836E+01,2.772384E+01,2.718019E+01,
2.664720E+01,2.612467E+01,2.561238E+01,2.511013E+01,2.461774E+01,2.413500E+01,2.366173E+01,
2.319774E+01,2.274284E+01,2.229687E+01,2.185964E+01,2.143099E+01,2.101074E+01,2.059873E+01,
2.019480E+01,1.979879E+01,1.941055E+01,1.902992E+01,1.865676E+01,1.829091E+01,1.793224E+01,
1.758060E+01,1.723585E+01,1.689787E+01,1.656651E+01,1.624165E+01,1.592316E+01,1.561092E+01,
1.530480E+01,1.500468E+01,1.471045E+01,1.442198E+01,1.413918E+01,1.386192E+01,1.359009E+01,
1.332360E+01,1.306233E+01,1.280619E+01,1.255507E+01,1.230887E+01,1.206750E+01,1.183086E+01,
1.159887E+01,1.137142E+01,1.114843E+01,1.092982E+01,1.071549E+01,1.050537E+01,1.029937E+01,
1.009740E+01,9.899397E+00,9.705276E+00,9.514962E+00,9.328379E+00,9.145455E+00,8.966118E+00,
8.790298E+00,8.617926E+00,8.448934E+00,8.283255E+00,8.120826E+00,7.961581E+00,7.805459E+00,
7.652399E+00,7.502340E+00,7.355224E+00,7.210992E+00,7.069589E+00,6.930959E+00,6.795047E+00,
6.661800E+00,6.531166E+00,6.403094E+00,6.277533E+00,6.154435E+00,6.033750E+00,5.915432E+00,
5.799434E+00,5.685711E+00,5.574217E+00,5.464910E+00,5.357747E+00,5.252685E+00,5.149683E+00,
5.048701E+00,4.949699E+00,4.852638E+00,4.757481E+00,4.664189E+00,4.572728E+00,4.483059E+00,
4.395149E+00,4.308963E+00,4.224467E+00,4.141628E+00,4.060413E+00,3.980791E+00,3.902730E+00,
3.826200E+00,3.751170E+00,3.677612E+00,3.605496E+00,3.534795E+00,3.465479E+00,3.397524E+00,
3.330900E+00,3.265583E+00,3.201547E+00,3.138767E+00,3.077217E+00,3.016875E+00,2.957716E+00,
2.899717E+00,2.842855E+00,2.787109E+00,2.732455E+00,2.678873E+00,2.626342E+00,2.574841E+00,
2.524350E+00,2.474849E+00,2.426319E+00,2.378740E+00,2.332095E+00,2.286364E+00,2.241530E+00,
2.197575E+00,2.154481E+00,2.112233E+00,2.070814E+00,2.030206E+00,1.990395E+00,1.951365E+00,
1.913100E+00,1.875585E+00,1.838806E+00,1.802748E+00,1.767397E+00,1.732740E+00,1.698762E+00,
1.665450E+00,1.632792E+00,1.600774E+00,1.569383E+00,1.538609E+00,1.508438E+00,1.478858E+00,
1.449859E+00,1.421428E+00,1.393554E+00,1.366228E+00,1.339437E+00,1.313171E+00,1.287421E+00,
1.262175E+00,1.237425E+00,1.213160E+00,1.189370E+00,1.166047E+00,1.143182E+00,1.120765E+00,
1.098787E+00,1.077241E+00,1.056117E+00,1.035407E+00,1.015103E+00,9.951976E-01,9.756824E-01,
9.565499E-01,9.377925E-01,9.194030E-01,9.013741E-01,8.836987E-01,8.663699E-01,8.493809E-01,
8.327250E-01,8.163958E-01,8.003868E-01,7.846917E-01,7.693044E-01,7.542188E-01,7.394290E-01,
7.249293E-01,7.107138E-01,6.967772E-01,6.831138E-01,6.697183E-01,6.565856E-01,6.437103E-01,
6.310876E-01,6.187123E-01,6.065798E-01,5.946851E-01,5.830237E-01,5.715909E-01,5.603824E-01,
5.493936E-01,5.386204E-01,5.280583E-01,5.177034E-01,5.075516E-01,4.975988E-01,4.878412E-01,
4.782749E-01,4.688963E-01,4.597015E-01,4.506870E-01,4.418493E-01,4.331849E-01,4.246904E-01,
4.163625E-01,4.081979E-01],
[1.338207E+02,1.378876E+02,1.355183E+02,1.328776E+02,1.302728E+02,1.277182E+02,1.252138E+02,
2.565791E+02,2.582388E+02,2.535095E+02,2.485550E+02,2.436818E+02,2.389034E+02,2.342187E+02,
3.634465E+02,3.630106E+02,3.562267E+02,3.492581E+02,3.424102E+02,3.356958E+02,3.291130E+02,
4.564800E+02,4.542198E+02,4.456473E+02,4.369252E+02,4.283582E+02,4.199584E+02,4.117233E+02,
5.374704E+02,5.336219E+02,5.234925E+02,5.132438E+02,5.031803E+02,4.933133E+02,4.836397E+02,
6.079765E+02,6.027455E+02,5.912606E+02,5.796831E+02,5.683167E+02,5.571724E+02,5.462466E+02,
6.693557E+02,6.629211E+02,6.502562E+02,6.375218E+02,6.250212E+02,6.127650E+02,6.007490E+02,
5.889687E+02,5.774194E+02,5.660965E+02,5.549957E+02,5.441126E+02,5.334429E+02,5.229824E+02,
5.127270E+02,5.026728E+02,4.928157E+02,4.831519E+02,4.736775E+02,4.643890E+02,4.552826E+02,
4.463548E+02,4.376021E+02,4.290210E+02,4.206081E+02,4.123602E+02,4.042741E+02,3.963465E+02,
3.885744E+02,3.809547E+02,3.734844E+02,3.661606E+02,3.589804E+02,3.519411E+02,3.450397E+02,
3.382737E+02,3.316404E+02,3.251371E+02,3.187613E+02,3.125106E+02,3.063825E+02,3.003745E+02,
2.944844E+02,2.887097E+02,2.830483E+02,2.774979E+02,2.720563E+02,2.667214E+02,2.614912E+02,
2.563635E+02,2.513364E+02,2.464078E+02,2.415759E+02,2.368388E+02,2.321945E+02,2.276413E+02,
2.231774E+02,2.188010E+02,2.145105E+02,2.103041E+02,2.061801E+02,2.021371E+02,1.981733E+02,
1.942872E+02,1.904774E+02,1.867422E+02,1.830803E+02,1.794902E+02,1.759705E+02,1.725199E+02,
1.691368E+02,1.658202E+02,1.625685E+02,1.593807E+02,1.562553E+02,1.531912E+02,1.501873E+02,
1.472422E+02,1.443548E+02,1.415241E+02,1.387489E+02,1.360282E+02,1.333607E+02,1.307456E+02,
1.281818E+02,1.256682E+02,1.232039E+02,1.207880E+02,1.184194E+02,1.160973E+02,1.138207E+02,
1.115887E+02,1.094005E+02,1.072552E+02,1.051520E+02,1.030901E+02,1.010685E+02,9.908664E+01,
9.714361E+01,9.523868E+01,9.337111E+01,9.154016E+01,8.974511E+01,8.798527E+01,8.625993E+01,
8.456842E+01,8.291009E+01,8.128427E+01,7.969034E+01,7.812766E+01,7.659562E+01,7.509363E+01,
7.362109E+01,7.217742E+01,7.076207E+01,6.937447E+01,6.801408E+01,6.668036E+01,6.537280E+01,
6.409088E+01,6.283410E+01,6.160196E+01,6.039398E+01,5.920969E+01,5.804863E+01,5.691033E+01,
5.579435E+01,5.470026E+01,5.362762E+01,5.257601E+01,5.154503E+01,5.053426E+01,4.954332E+01,
4.857180E+01,4.761934E+01,4.668555E+01,4.577008E+01,4.487256E+01,4.399263E+01,4.312996E+01,
4.228421E+01,4.145504E+01,4.064214E+01,3.984517E+01,3.906383E+01,3.829781E+01,3.754681E+01,
3.681054E+01,3.608871E+01,3.538103E+01,3.468723E+01,3.400704E+01,3.334018E+01,3.268640E+01,
3.204544E+01,3.141705E+01,3.080098E+01,3.019699E+01,2.960485E+01,2.902431E+01,2.845516E+01,
2.789718E+01,2.735013E+01,2.681381E+01,2.628801E+01,2.577252E+01,2.526713E+01,2.477166E+01,
2.428590E+01,2.380967E+01,2.334278E+01,2.288504E+01,2.243628E+01,2.199632E+01,2.156498E+01,
2.114211E+01,2.072752E+01,2.032107E+01,1.992258E+01,1.953191E+01,1.914891E+01,1.877341E+01,
1.840527E+01,1.804436E+01,1.769052E+01,1.734362E+01,1.700352E+01,1.667009E+01,1.634320E+01,
1.602272E+01,1.570852E+01,1.540049E+01,1.509850E+01,1.480242E+01,1.451216E+01,1.422758E+01,
1.394859E+01,1.367506E+01,1.340690E+01,1.314400E+01,1.288626E+01,1.263357E+01,1.238583E+01,
1.214295E+01,1.190484E+01,1.167139E+01,1.144252E+01,1.121814E+01,1.099816E+01,1.078249E+01,
1.057105E+01,1.036376E+01,1.016053E+01,9.961292E+00,9.765957E+00,9.574453E+00,9.386704E+00,
9.202636E+00,9.022178E+00,8.845259E+00,8.671808E+00,8.501760E+00,8.335045E+00,8.171600E+00,
8.011360E+00,7.854262E+00,7.700245E+00,7.549248E+00,7.401212E+00,7.256078E+00,7.113791E+00,
6.974294E+00,6.837532E+00,6.703452E+00,6.572002E+00,6.443129E+00,6.316783E+00,6.192915E+00,
6.071476E+00,5.952418E+00,5.835694E+00,5.721260E+00,5.609069E+00,5.499079E+00,5.391245E+00,
5.285526E+00,5.181880E+00,5.080267E+00,4.980646E+00,4.882979E+00,4.787226E+00,4.693352E+00,
4.601318E+00,4.511089E+00,4.422629E+00,4.335904E+00,4.250880E+00,4.167523E+00,4.085800E+00,
4.005680E+00,3.927131E+00,3.850122E+00,3.774624E+00,3.700606E+00,3.628039E+00,3.556896E+00,
3.487147E+00,3.418766E+00,3.351726E+00,3.286001E+00,3.221564E+00,3.158392E+00,3.096457E+00,
3.035738E+00,2.976209E+00,2.917847E+00,2.860630E+00,2.804535E+00,2.749540E+00,2.695623E+00,
2.642763E+00,2.590940E+00,2.540133E+00,2.490323E+00,2.441489E+00,2.393613E+00,2.346676E+00,
2.300659E+00,2.255544E+00,2.211315E+00,2.167952E+00,2.125440E+00,2.083761E+00,2.042900E+00,
2.002840E+00,1.963566E+00,1.925061E+00,1.887312E+00,1.850303E+00,1.814020E+00,1.778448E+00,
1.743573E+00,1.709383E+00,1.675863E+00,1.643000E+00,1.610782E+00,1.579196E+00,1.548229E+00,
1.517869E+00,1.488104E+00,1.458924E+00,1.430315E+00,1.402267E+00,1.374770E+00,1.347811E+00,
1.321382E+00,1.295470E+00,1.270067E+00,1.245162E+00,1.220745E+00,1.196807E+00,1.173338E+00,
1.150330E+00,1.127772E+00]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
# internally specified variables
a1 = pd.Series([.621, .621, .648], dtype='float')
b1 = pd.Series([.564, .564, .651], dtype='float')
# internally specified variables from external database
body_wgt = pd.Series([15., 1000., 20.], dtype='float')
frac_h2o = pd.Series([0.8, 0.8, 0.8], dtype='float')
# input variables that change per simulation
ted_empty.frac_retained_mamm = pd.Series([0.1, 0.1, 0.05], dtype='float')
# internally calculated variables
intake_food_conc = pd.Series([[3.000000E+02,2.941172E+02,2.883497E+02,2.826954E+02,2.771519E+02,
2.717171E+02,2.663889E+02,5.611652E+02,5.501611E+02,5.393727E+02,
5.287960E+02,5.184266E+02,5.082606E+02,4.982939E+02,7.885227E+02,
7.730602E+02,7.579010E+02,7.430390E+02,7.284684E+02,7.141836E+02,
7.001789E+02,9.864488E+02,9.671052E+02,9.481408E+02,9.295484E+02,
9.113205E+02,8.934501E+02,8.759300E+02,1.158754E+03,1.136031E+03,
1.113754E+03,1.091914E+03,1.070502E+03,1.049511E+03,1.028930E+03,
1.308754E+03,1.283090E+03,1.257929E+03,1.233262E+03,1.209078E+03,
1.185369E+03,1.162125E+03,1.439336E+03,1.411112E+03,1.383441E+03,
1.356312E+03,1.329716E+03,1.303641E+03,1.278077E+03,1.253015E+03,
1.228444E+03,1.204355E+03,1.180738E+03,1.157585E+03,1.134885E+03,
1.112631E+03,1.090813E+03,1.069423E+03,1.048452E+03,1.027892E+03,
1.007736E+03,9.879750E+02,9.686014E+02,9.496077E+02,9.309865E+02,
9.127304E+02,8.948323E+02,8.772852E+02,8.600822E+02,8.432165E+02,
8.266816E+02,8.104708E+02,7.945780E+02,7.789968E+02,7.637211E+02,
7.487450E+02,7.340626E+02,7.196681E+02,7.055558E+02,6.917203E+02,
6.781561E+02,6.648579E+02,6.518204E+02,6.390386E+02,6.265075E+02,
6.142220E+02,6.021775E+02,5.903692E+02,5.787924E+02,5.674426E+02,
5.563154E+02,5.454064E+02,5.347113E+02,5.242260E+02,5.139462E+02,
5.038680E+02,4.939875E+02,4.843007E+02,4.748039E+02,4.654933E+02,
4.563652E+02,4.474162E+02,4.386426E+02,4.300411E+02,4.216083E+02,
4.133408E+02,4.052354E+02,3.972890E+02,3.894984E+02,3.818606E+02,
3.743725E+02,3.670313E+02,3.598340E+02,3.527779E+02,3.458602E+02,
3.390781E+02,3.324289E+02,3.259102E+02,3.195193E+02,3.132537E+02,
3.071110E+02,3.010888E+02,2.951846E+02,2.893962E+02,2.837213E+02,
2.781577E+02,2.727032E+02,2.673557E+02,2.621130E+02,2.569731E+02,
2.519340E+02,2.469938E+02,2.421504E+02,2.374019E+02,2.327466E+02,
2.281826E+02,2.237081E+02,2.193213E+02,2.150205E+02,2.108041E+02,
2.066704E+02,2.026177E+02,1.986445E+02,1.947492E+02,1.909303E+02,
1.871863E+02,1.835157E+02,1.799170E+02,1.763890E+02,1.729301E+02,
1.695390E+02,1.662145E+02,1.629551E+02,1.597597E+02,1.566269E+02,
1.535555E+02,1.505444E+02,1.475923E+02,1.446981E+02,1.418607E+02,
1.390789E+02,1.363516E+02,1.336778E+02,1.310565E+02,1.284866E+02,
1.259670E+02,1.234969E+02,1.210752E+02,1.187010E+02,1.163733E+02,
1.140913E+02,1.118540E+02,1.096607E+02,1.075103E+02,1.054021E+02,
1.033352E+02,1.013089E+02,9.932225E+01,9.737460E+01,9.546514E+01,
9.359313E+01,9.175783E+01,8.995851E+01,8.819448E+01,8.646504E+01,
8.476951E+01,8.310723E+01,8.147755E+01,7.987983E+01,7.831343E+01,
7.677775E+01,7.527219E+01,7.379615E+01,7.234905E+01,7.093033E+01,
6.953943E+01,6.817580E+01,6.683892E+01,6.552825E+01,6.424328E+01,
6.298351E+01,6.174844E+01,6.053759E+01,5.935048E+01,5.818666E+01,
5.704565E+01,5.592702E+01,5.483033E+01,5.375514E+01,5.270103E+01,
5.166760E+01,5.065443E+01,4.966112E+01,4.868730E+01,4.773257E+01,
4.679657E+01,4.587891E+01,4.497926E+01,4.409724E+01,4.323252E+01,
4.238476E+01,4.155362E+01,4.073878E+01,3.993991E+01,3.915672E+01,
3.838888E+01,3.763609E+01,3.689807E+01,3.617452E+01,3.546516E+01,
3.476971E+01,3.408790E+01,3.341946E+01,3.276412E+01,3.212164E+01,
3.149175E+01,3.087422E+01,3.026879E+01,2.967524E+01,2.909333E+01,
2.852283E+01,2.796351E+01,2.741516E+01,2.687757E+01,2.635052E+01,
2.583380E+01,2.532721E+01,2.483056E+01,2.434365E+01,2.386629E+01,
2.339828E+01,2.293946E+01,2.248963E+01,2.204862E+01,2.161626E+01,
2.119238E+01,2.077681E+01,2.036939E+01,1.996996E+01,1.957836E+01,
1.919444E+01,1.881805E+01,1.844904E+01,1.808726E+01,1.773258E+01,
1.738486E+01,1.704395E+01,1.670973E+01,1.638206E+01,1.606082E+01,
1.574588E+01,1.543711E+01,1.513440E+01,1.483762E+01,1.454666E+01,
1.426141E+01,1.398176E+01,1.370758E+01,1.343878E+01,1.317526E+01,
1.291690E+01,1.266361E+01,1.241528E+01,1.217183E+01,1.193314E+01,
1.169914E+01,1.146973E+01,1.124481E+01,1.102431E+01,1.080813E+01,
1.059619E+01,1.038840E+01,1.018469E+01,9.984978E+00,9.789179E+00,
9.597219E+00,9.409024E+00,9.224518E+00,9.043631E+00,8.866291E+00,
8.692429E+00,8.521975E+00,8.354865E+00,8.191031E+00,8.030410E+00,
7.872938E+00,7.718555E+00,7.567199E+00,7.418810E+00,7.273332E+00,
7.130706E+00,6.990878E+00,6.853791E+00,6.719392E+00,6.587629E+00,
6.458450E+00,6.331803E+00,6.207641E+00,6.085913E+00,5.966571E+00,
5.849571E+00,5.734864E+00,5.622407E+00,5.512155E+00,5.404065E+00,
5.298095E+00,5.194202E+00,5.092347E+00,4.992489E+00,4.894590E+00,
4.798610E+00,4.704512E+00,4.612259E+00,4.521816E+00,4.433146E+00,
4.346214E+00,4.260988E+00,4.177432E+00,4.095515E+00,4.015205E+00,
3.936469E+00,3.859277E+00,3.783599E+00,3.709405E+00,3.636666E+00,
3.565353E+00,3.495439E+00,3.426895E+00,3.359696E+00,3.293814E+00,
3.229225E+00,3.165902E+00,3.103820E+00,3.042956E+00,2.983286E+00,
2.924785E+00,2.867432E+00,2.811203E+00,2.756077E+00,2.702032E+00,
2.649047E+00,2.597101E+00,2.546174E+00,2.496245E+00,2.447295E+00,
2.399305E+00],
[3.000000E+02,2.941172E+02,2.883497E+02,2.826954E+02,2.771519E+02,
2.717171E+02,2.663889E+02,5.611652E+02,5.501611E+02,5.393727E+02,
5.287960E+02,5.184266E+02,5.082606E+02,4.982939E+02,7.885227E+02,
7.730602E+02,7.579010E+02,7.430390E+02,7.284684E+02,7.141836E+02,
7.001789E+02,9.864488E+02,9.671052E+02,9.481408E+02,9.295484E+02,
9.113205E+02,8.934501E+02,8.759300E+02,1.158754E+03,1.136031E+03,
1.113754E+03,1.091914E+03,1.070502E+03,1.049511E+03,1.028930E+03,
1.308754E+03,1.283090E+03,1.257929E+03,1.233262E+03,1.209078E+03,
1.185369E+03,1.162125E+03,1.439336E+03,1.411112E+03,1.383441E+03,
1.356312E+03,1.329716E+03,1.303641E+03,1.278077E+03,1.253015E+03,
1.228444E+03,1.204355E+03,1.180738E+03,1.157585E+03,1.134885E+03,
1.112631E+03,1.090813E+03,1.069423E+03,1.048452E+03,1.027892E+03,
1.007736E+03,9.879750E+02,9.686014E+02,9.496077E+02,9.309865E+02,
9.127304E+02,8.948323E+02,8.772852E+02,8.600822E+02,8.432165E+02,
8.266816E+02,8.104708E+02,7.945780E+02,7.789968E+02,7.637211E+02,
7.487450E+02,7.340626E+02,7.196681E+02,7.055558E+02,6.917203E+02,
6.781561E+02,6.648579E+02,6.518204E+02,6.390386E+02,6.265075E+02,
6.142220E+02,6.021775E+02,5.903692E+02,5.787924E+02,5.674426E+02,
5.563154E+02,5.454064E+02,5.347113E+02,5.242260E+02,5.139462E+02,
5.038680E+02,4.939875E+02,4.843007E+02,4.748039E+02,4.654933E+02,
4.563652E+02,4.474162E+02,4.386426E+02,4.300411E+02,4.216083E+02,
4.133408E+02,4.052354E+02,3.972890E+02,3.894984E+02,3.818606E+02,
3.743725E+02,3.670313E+02,3.598340E+02,3.527779E+02,3.458602E+02,
3.390781E+02,3.324289E+02,3.259102E+02,3.195193E+02,3.132537E+02,
3.071110E+02,3.010888E+02,2.951846E+02,2.893962E+02,2.837213E+02,
2.781577E+02,2.727032E+02,2.673557E+02,2.621130E+02,2.569731E+02,
2.519340E+02,2.469938E+02,2.421504E+02,2.374019E+02,2.327466E+02,
2.281826E+02,2.237081E+02,2.193213E+02,2.150205E+02,2.108041E+02,
2.066704E+02,2.026177E+02,1.986445E+02,1.947492E+02,1.909303E+02,
1.871863E+02,1.835157E+02,1.799170E+02,1.763890E+02,1.729301E+02,
1.695390E+02,1.662145E+02,1.629551E+02,1.597597E+02,1.566269E+02,
1.535555E+02,1.505444E+02,1.475923E+02,1.446981E+02,1.418607E+02,
1.390789E+02,1.363516E+02,1.336778E+02,1.310565E+02,1.284866E+02,
1.259670E+02,1.234969E+02,1.210752E+02,1.187010E+02,1.163733E+02,
1.140913E+02,1.118540E+02,1.096607E+02,1.075103E+02,1.054021E+02,
1.033352E+02,1.013089E+02,9.932225E+01,9.737460E+01,9.546514E+01,
9.359313E+01,9.175783E+01,8.995851E+01,8.819448E+01,8.646504E+01,
8.476951E+01,8.310723E+01,8.147755E+01,7.987983E+01,7.831343E+01,
7.677775E+01,7.527219E+01,7.379615E+01,7.234905E+01,7.093033E+01,
6.953943E+01,6.817580E+01,6.683892E+01,6.552825E+01,6.424328E+01,
6.298351E+01,6.174844E+01,6.053759E+01,5.935048E+01,5.818666E+01,
5.704565E+01,5.592702E+01,5.483033E+01,5.375514E+01,5.270103E+01,
5.166760E+01,5.065443E+01,4.966112E+01,4.868730E+01,4.773257E+01,
4.679657E+01,4.587891E+01,4.497926E+01,4.409724E+01,4.323252E+01,
4.238476E+01,4.155362E+01,4.073878E+01,3.993991E+01,3.915672E+01,
3.838888E+01,3.763609E+01,3.689807E+01,3.617452E+01,3.546516E+01,
3.476971E+01,3.408790E+01,3.341946E+01,3.276412E+01,3.212164E+01,
3.149175E+01,3.087422E+01,3.026879E+01,2.967524E+01,2.909333E+01,
2.852283E+01,2.796351E+01,2.741516E+01,2.687757E+01,2.635052E+01,
2.583380E+01,2.532721E+01,2.483056E+01,2.434365E+01,2.386629E+01,
2.339828E+01,2.293946E+01,2.248963E+01,2.204862E+01,2.161626E+01,
2.119238E+01,2.077681E+01,2.036939E+01,1.996996E+01,1.957836E+01,
1.919444E+01,1.881805E+01,1.844904E+01,1.808726E+01,1.773258E+01,
1.738486E+01,1.704395E+01,1.670973E+01,1.638206E+01,1.606082E+01,
1.574588E+01,1.543711E+01,1.513440E+01,1.483762E+01,1.454666E+01,
1.426141E+01,1.398176E+01,1.370758E+01,1.343878E+01,1.317526E+01,
1.291690E+01,1.266361E+01,1.241528E+01,1.217183E+01,1.193314E+01,
1.169914E+01,1.146973E+01,1.124481E+01,1.102431E+01,1.080813E+01,
1.059619E+01,1.038840E+01,1.018469E+01,9.984978E+00,9.789179E+00,
9.597219E+00,9.409024E+00,9.224518E+00,9.043631E+00,8.866291E+00,
8.692429E+00,8.521975E+00,8.354865E+00,8.191031E+00,8.030410E+00,
7.872938E+00,7.718555E+00,7.567199E+00,7.418810E+00,7.273332E+00,
7.130706E+00,6.990878E+00,6.853791E+00,6.719392E+00,6.587629E+00,
6.458450E+00,6.331803E+00,6.207641E+00,6.085913E+00,5.966571E+00,
5.849571E+00,5.734864E+00,5.622407E+00,5.512155E+00,5.404065E+00,
5.298095E+00,5.194202E+00,5.092347E+00,4.992489E+00,4.894590E+00,
4.798610E+00,4.704512E+00,4.612259E+00,4.521816E+00,4.433146E+00,
4.346214E+00,4.260988E+00,4.177432E+00,4.095515E+00,4.015205E+00,
3.936469E+00,3.859277E+00,3.783599E+00,3.709405E+00,3.636666E+00,
3.565353E+00,3.495439E+00,3.426895E+00,3.359696E+00,3.293814E+00,
3.229225E+00,3.165902E+00,3.103820E+00,3.042956E+00,2.983286E+00,
2.924785E+00,2.867432E+00,2.811203E+00,2.756077E+00,2.702032E+00,
2.649047E+00,2.597101E+00,2.546174E+00,2.496245E+00,2.447295E+00,
2.399305E+00],
[1.175000E+02,1.151959E+02,1.129370E+02,1.107224E+02,
1.085512E+02,1.064225E+02,1.043356E+02,2.197897E+02,2.154797E+02,
2.112543E+02,2.071118E+02,2.030504E+02,1.990687E+02,1.951651E+02,
3.088380E+02,3.027819E+02,2.968445E+02,2.910236E+02,2.853168E+02,
2.797219E+02,2.742367E+02,3.863591E+02,3.787829E+02,3.713552E+02,
3.640731E+02,3.569339E+02,3.499346E+02,3.430726E+02,4.538452E+02,
4.449455E+02,4.362204E+02,4.276664E+02,4.192801E+02,4.110583E+02,
4.029977E+02,5.125952E+02,5.025435E+02,4.926889E+02,4.830276E+02,
4.735557E+02,4.642696E+02,4.551655E+02,5.637400E+02,5.526854E+02,
5.418476E+02,5.312223E+02,5.208053E+02,5.105927E+02,5.005803E+02,
4.907642E+02,4.811406E+02,4.717057E+02,4.624559E+02,4.533874E+02,
4.444967E+02,4.357804E+02,4.272350E+02,4.188572E+02,4.106437E+02,
4.025912E+02,3.946966E+02,3.869569E+02,3.793689E+02,3.719297E+02,
3.646364E+02,3.574861E+02,3.504760E+02,3.436034E+02,3.368655E+02,
3.302598E+02,3.237836E+02,3.174344E+02,3.112097E+02,3.051071E+02,
2.991241E+02,2.932585E+02,2.875079E+02,2.818700E+02,2.763427E+02,
2.709238E+02,2.656111E+02,2.604027E+02,2.552963E+02,2.502901E+02,
2.453821E+02,2.405703E+02,2.358529E+02,2.312279E+02,2.266937E+02,
2.222484E+02,2.178902E+02,2.136175E+02,2.094286E+02,2.053218E+02,
2.012956E+02,1.973483E+02,1.934784E+02,1.896844E+02,1.859648E+02,
1.823182E+02,1.787430E+02,1.752380E+02,1.718017E+02,1.684328E+02,
1.651299E+02,1.618918E+02,1.587172E+02,1.556049E+02,1.525535E+02,
1.495621E+02,1.466292E+02,1.437539E+02,1.409350E+02,1.381714E+02,
1.354619E+02,1.328056E+02,1.302013E+02,1.276482E+02,1.251451E+02,
1.226910E+02,1.202851E+02,1.179264E+02,1.156140E+02,1.133468E+02,
1.111242E+02,1.089451E+02,1.068088E+02,1.047143E+02,1.026609E+02,
1.006478E+02,9.867416E+01,9.673922E+01,9.484222E+01,9.298242E+01,
9.115910E+01,8.937152E+01,8.761900E+01,8.590085E+01,8.421638E+01,
8.256495E+01,8.094590E+01,7.935860E+01,7.780243E+01,7.627677E+01,
7.478103E+01,7.331462E+01,7.187696E+01,7.046750E+01,6.908568E+01,
6.773095E+01,6.640279E+01,6.510067E+01,6.382408E+01,6.257253E+01,
6.134552E+01,6.014257E+01,5.896321E+01,5.780698E+01,5.667342E+01,
5.556209E+01,5.447255E+01,5.340438E+01,5.235715E+01,5.133046E+01,
5.032390E+01,4.933708E+01,4.836961E+01,4.742111E+01,4.649121E+01,
4.557955E+01,4.468576E+01,4.380950E+01,4.295042E+01,4.210819E+01,
4.128248E+01,4.047295E+01,3.967930E+01,3.890121E+01,3.813839E+01,
3.739051E+01,3.665731E+01,3.593848E+01,3.523375E+01,3.454284E+01,
3.386547E+01,3.320139E+01,3.255033E+01,3.191204E+01,3.128627E+01,
3.067276E+01,3.007129E+01,2.948161E+01,2.890349E+01,2.833671E+01,
2.778105E+01,2.723628E+01,2.670219E+01,2.617858E+01,2.566523E+01,
2.516195E+01,2.466854E+01,2.418480E+01,2.371056E+01,2.324561E+01,
2.278977E+01,2.234288E+01,2.190475E+01,2.147521E+01,2.105410E+01,
2.064124E+01,2.023648E+01,1.983965E+01,1.945061E+01,1.906919E+01,
1.869526E+01,1.832865E+01,1.796924E+01,1.761688E+01,1.727142E+01,
1.693274E+01,1.660070E+01,1.627517E+01,1.595602E+01,1.564313E+01,
1.533638E+01,1.503564E+01,1.474080E+01,1.445175E+01,1.416836E+01,
1.389052E+01,1.361814E+01,1.335109E+01,1.308929E+01,1.283261E+01,
1.258098E+01,1.233427E+01,1.209240E+01,1.185528E+01,1.162280E+01,
1.139489E+01,1.117144E+01,1.095238E+01,1.073761E+01,1.052705E+01,
1.032062E+01,1.011824E+01,9.919825E+00,9.725304E+00,9.534596E+00,
9.347629E+00,9.164327E+00,8.984620E+00,8.808438E+00,8.635709E+00,
8.466368E+00,8.300348E+00,8.137583E+00,7.978010E+00,7.821566E+00,
7.668190E+00,7.517822E+00,7.370402E+00,7.225873E+00,7.084178E+00,
6.945261E+00,6.809069E+00,6.675547E+00,6.544644E+00,6.416307E+00,
6.290488E+00,6.167135E+00,6.046201E+00,5.927639E+00,5.811402E+00,
5.697443E+00,5.585720E+00,5.476188E+00,5.368803E+00,5.263524E+00,
5.160309E+00,5.059119E+00,4.959913E+00,4.862652E+00,4.767298E+00,
4.673814E+00,4.582164E+00,4.492310E+00,4.404219E+00,4.317855E+00,
4.233184E+00,4.150174E+00,4.068792E+00,3.989005E+00,3.910783E+00,
3.834095E+00,3.758911E+00,3.685201E+00,3.612936E+00,3.542089E+00,
3.472631E+00,3.404535E+00,3.337774E+00,3.272322E+00,3.208154E+00,
3.145244E+00,3.083567E+00,3.023101E+00,2.963819E+00,2.905701E+00,
2.848722E+00,2.792860E+00,2.738094E+00,2.684401E+00,2.631762E+00,
2.580155E+00,2.529559E+00,2.479956E+00,2.431326E+00,2.383649E+00,
2.336907E+00,2.291082E+00,2.246155E+00,2.202109E+00,2.158927E+00,
2.116592E+00,2.075087E+00,2.034396E+00,1.994503E+00,1.955392E+00,
1.917048E+00,1.879455E+00,1.842600E+00,1.806468E+00,1.771044E+00,
1.736315E+00,1.702267E+00,1.668887E+00,1.636161E+00,1.604077E+00,
1.572622E+00,1.541784E+00,1.511550E+00,1.481910E+00,1.452850E+00,
1.424361E+00,1.396430E+00,1.369047E+00,1.342201E+00,1.315881E+00,
1.290077E+00,1.264780E+00,1.239978E+00,1.215663E+00,1.191825E+00,
1.168454E+00,1.145541E+00,1.123078E+00,1.101055E+00,1.079464E+00,
1.058296E+00,1.037544E+00,1.017198E+00,9.972513E-01,9.776958E-01,
9.585238E-01,9.397277E-01]], dtype='float')
for i in range(3):
result[i] = ted_empty.daily_animal_dose_timeseries(a1[i], b1[i], body_wgt[i], frac_h2o[i], intake_food_conc[i], ted_empty.frac_retained_mamm[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_daily_canopy_air_timeseries(self):
"""
:description generates annual timeseries of daily pesticide concentrations in soil pore water and surface puddles
:param i; simulation number/index
:param application rate; active ingredient application rate (lbs a.i./acre)
:param food_multiplier; factor by which application rate of active ingredient is multiplied to estimate dietary based EECs
:param daily_flag; daily flag denoting if pesticide is applied (0 - not applied, 1 - applied)
:param water_type; type of water (pore water or surface puddles)
:Notes # calculations are performed daily from day of first application (assumed day 0) through the last day of a year
# note: day numbers are synchronized with 0-based array indexing; thus the year does not have a calendar specific
# assoication, rather it is one year from the day of 1st pesticide application
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [[2.697542E-06,2.575726E-06,2.459410E-06,5.045889E-06,4.818025E-06,4.600451E-06,
7.090244E-06,6.770060E-06,6.464335E-06,6.172416E-06,5.893680E-06,5.627531E-06,
5.373400E-06,5.130746E-06,4.899050E-06,4.677817E-06,4.466574E-06,4.264871E-06,
4.072276E-06,3.888378E-06,3.712786E-06,3.545122E-06,3.385030E-06,3.232168E-06,
3.086208E-06,2.946840E-06,2.813765E-06,2.686700E-06,2.565373E-06,2.449525E-06,
2.338908E-06,2.233287E-06,2.132435E-06,2.036138E-06,1.944189E-06,1.856393E-06,
1.772561E-06,1.692515E-06,1.616084E-06,1.543104E-06,1.473420E-06,1.406883E-06,
1.343350E-06,1.282687E-06,1.224762E-06,1.169454E-06,1.116643E-06,1.066218E-06,
1.018069E-06,9.720946E-07,9.281964E-07,8.862805E-07,8.462575E-07,8.080419E-07,
7.715520E-07,7.367100E-07,7.034413E-07,6.716750E-07,6.413433E-07,6.123812E-07,
5.847271E-07,5.583217E-07,5.331088E-07,5.090345E-07,4.860473E-07,4.640982E-07,
4.431403E-07,4.231288E-07,4.040209E-07,3.857760E-07,3.683550E-07,3.517207E-07,
3.358375E-07,3.206716E-07,3.061906E-07,2.923635E-07,2.791609E-07,2.665544E-07,
2.545172E-07,2.430237E-07,2.320491E-07,2.215701E-07,2.115644E-07,2.020105E-07,
1.928880E-07,1.841775E-07,1.758603E-07,1.679188E-07,1.603358E-07,1.530953E-07,
1.461818E-07,1.395804E-07,1.332772E-07,1.272586E-07,1.215118E-07,1.160245E-07,
1.107851E-07,1.057822E-07,1.010052E-07,9.644400E-08,9.208875E-08,8.793017E-08,
8.395938E-08,8.016791E-08,7.654765E-08,7.309089E-08,6.979022E-08,6.663860E-08,
6.362931E-08,6.075591E-08,5.801227E-08,5.539253E-08,5.289110E-08,5.050262E-08,
4.822200E-08,4.604437E-08,4.396508E-08,4.197969E-08,4.008395E-08,3.827383E-08,
3.654544E-08,3.489511E-08,3.331930E-08,3.181466E-08,3.037796E-08,2.900614E-08,
2.769627E-08,2.644555E-08,2.525131E-08,2.411100E-08,2.302219E-08,2.198254E-08,
2.098985E-08,2.004198E-08,1.913691E-08,1.827272E-08,1.744755E-08,1.665965E-08,
1.590733E-08,1.518898E-08,1.450307E-08,1.384813E-08,1.322277E-08,1.262565E-08,
1.205550E-08,1.151109E-08,1.099127E-08,1.049492E-08,1.002099E-08,9.568457E-09,
9.136361E-09,8.723777E-09,8.329826E-09,7.953664E-09,7.594489E-09,7.251534E-09,
6.924067E-09,6.611387E-09,6.312827E-09,6.027750E-09,5.755547E-09,5.495635E-09,
5.247461E-09,5.010494E-09,4.784228E-09,4.568180E-09,4.361889E-09,4.164913E-09,
3.976832E-09,3.797245E-09,3.625767E-09,3.462033E-09,3.305693E-09,3.156414E-09,
3.013875E-09,2.877773E-09,2.747818E-09,2.623731E-09,2.505247E-09,2.392114E-09,
2.284090E-09,2.180944E-09,2.082456E-09,1.988416E-09,1.898622E-09,1.812884E-09,
1.731017E-09,1.652847E-09,1.578207E-09,1.506938E-09,1.438887E-09,1.373909E-09,
1.311865E-09,1.252624E-09,1.196057E-09,1.142045E-09,1.090472E-09,1.041228E-09,
9.942080E-10,9.493112E-10,9.064418E-10,8.655083E-10,8.264234E-10,7.891034E-10,
7.534688E-10,7.194433E-10,6.869544E-10,6.559327E-10,6.263118E-10,5.980286E-10,
5.710225E-10,5.452361E-10,5.206141E-10,4.971040E-10,4.746556E-10,4.532209E-10,
4.327542E-10,4.132117E-10,3.945517E-10,3.767344E-10,3.597217E-10,3.434772E-10,
3.279663E-10,3.131559E-10,2.990143E-10,2.855113E-10,2.726180E-10,2.603070E-10,
2.485520E-10,2.373278E-10,2.266104E-10,2.163771E-10,2.066058E-10,1.972759E-10,
1.883672E-10,1.798608E-10,1.717386E-10,1.639832E-10,1.565779E-10,1.495071E-10,
1.427556E-10,1.363090E-10,1.301535E-10,1.242760E-10,1.186639E-10,1.133052E-10,
1.081885E-10,1.033029E-10,9.863793E-11,9.418360E-11,8.993042E-11,8.586930E-11,
8.199158E-11,7.828897E-11,7.475357E-11,7.137782E-11,6.815451E-11,6.507676E-11,
6.213800E-11,5.933195E-11,5.665261E-11,5.409427E-11,5.165146E-11,4.931896E-11,
4.709180E-11,4.496521E-11,4.293465E-11,4.099579E-11,3.914449E-11,3.737678E-11,
3.568891E-11,3.407726E-11,3.253838E-11,3.106900E-11,2.966597E-11,2.832631E-11,
2.704714E-11,2.582573E-11,2.465948E-11,2.354590E-11,2.248260E-11,2.146733E-11,
2.049790E-11,1.957224E-11,1.868839E-11,1.784445E-11,1.703863E-11,1.626919E-11,
1.553450E-11,1.483299E-11,1.416315E-11,1.352357E-11,1.291287E-11,1.232974E-11,
1.177295E-11,1.124130E-11,1.073366E-11,1.024895E-11,9.786122E-12,9.344196E-12,
8.922227E-12,8.519314E-12,8.134595E-12,7.767250E-12,7.416493E-12,7.081576E-12,
6.761784E-12,6.456433E-12,6.164870E-12,5.886475E-12,5.620651E-12,5.366831E-12,
5.124474E-12,4.893061E-12,4.672098E-12,4.461114E-12,4.259657E-12,4.067298E-12,
3.883625E-12,3.708247E-12,3.540788E-12,3.380892E-12,3.228216E-12,3.082435E-12,
2.943237E-12,2.810325E-12,2.683416E-12,2.562237E-12,2.446530E-12,2.336049E-12,
2.230557E-12,2.129828E-12,2.033649E-12,1.941812E-12,1.854123E-12,1.770394E-12,
1.690446E-12,1.614108E-12,1.541218E-12,1.471619E-12,1.405163E-12,1.341708E-12,
1.281118E-12,1.223265E-12,1.168025E-12,1.115278E-12,1.064914E-12,1.016824E-12,
9.709062E-13,9.270617E-13,8.851971E-13,8.452230E-13,8.070541E-13,7.706088E-13,
7.358093E-13,7.025814E-13,6.708539E-13,6.405592E-13,6.116326E-13,5.840123E-13,
5.576392E-13,5.324571E-13,5.084122E-13,4.854531E-13,4.635308E-13,4.425985E-13],
[1.747062E-05,1.699289E-05,1.652822E-05,1.607625E-05,1.563665E-05,1.520906E-05,
1.479317E-05,3.185927E-05,3.098808E-05,3.014071E-05,2.931651E-05,2.851485E-05,
2.773511E-05,2.697669E-05,4.370963E-05,4.251439E-05,4.135183E-05,4.022106E-05,
3.912122E-05,3.805144E-05,3.701093E-05,5.346948E-05,5.200736E-05,5.058521E-05,
4.920196E-05,4.785653E-05,4.654789E-05,4.527503E-05,6.150761E-05,5.982568E-05,
5.818974E-05,5.659854E-05,5.505085E-05,5.354548E-05,5.208128E-05,5.065711E-05,
4.927189E-05,4.792455E-05,4.661405E-05,4.533939E-05,4.409958E-05,4.289367E-05,
4.172074E-05,4.057989E-05,3.947023E-05,3.839091E-05,3.734111E-05,3.632002E-05,
3.532684E-05,3.436083E-05,3.342123E-05,3.250733E-05,3.161841E-05,3.075380E-05,
2.991284E-05,2.909487E-05,2.829927E-05,2.752543E-05,2.677274E-05,2.604064E-05,
2.532856E-05,2.463595E-05,2.396227E-05,2.330703E-05,2.266969E-05,2.204979E-05,
2.144684E-05,2.086037E-05,2.028994E-05,1.973511E-05,1.919546E-05,1.867056E-05,
1.816001E-05,1.766342E-05,1.718041E-05,1.671062E-05,1.625366E-05,1.580921E-05,
1.537690E-05,1.495642E-05,1.454744E-05,1.414964E-05,1.376271E-05,1.338637E-05,
1.302032E-05,1.266428E-05,1.231797E-05,1.198114E-05,1.165351E-05,1.133485E-05,
1.102489E-05,1.072342E-05,1.043019E-05,1.014497E-05,9.867557E-06,9.597728E-06,
9.335278E-06,9.080004E-06,8.831711E-06,8.590207E-06,8.355308E-06,8.126831E-06,
7.904603E-06,7.688451E-06,7.478210E-06,7.273718E-06,7.074818E-06,6.881356E-06,
6.693185E-06,6.510160E-06,6.332139E-06,6.158987E-06,5.990569E-06,5.826756E-06,
5.667423E-06,5.512447E-06,5.361709E-06,5.215093E-06,5.072486E-06,4.933779E-06,
4.798864E-06,4.667639E-06,4.540002E-06,4.415856E-06,4.295104E-06,4.177654E-06,
4.063416E-06,3.952301E-06,3.844226E-06,3.739105E-06,3.636859E-06,3.537409E-06,
3.440678E-06,3.346593E-06,3.255080E-06,3.166070E-06,3.079493E-06,2.995284E-06,
2.913378E-06,2.833712E-06,2.756224E-06,2.680855E-06,2.607546E-06,2.536243E-06,
2.466889E-06,2.399432E-06,2.333819E-06,2.270001E-06,2.207928E-06,2.147552E-06,
2.088827E-06,2.031708E-06,1.976151E-06,1.922113E-06,1.869552E-06,1.818429E-06,
1.768704E-06,1.720339E-06,1.673296E-06,1.627540E-06,1.583035E-06,1.539747E-06,
1.497642E-06,1.456689E-06,1.416856E-06,1.378112E-06,1.340427E-06,1.303773E-06,
1.268121E-06,1.233445E-06,1.199716E-06,1.166910E-06,1.135001E-06,1.103964E-06,
1.073776E-06,1.044413E-06,1.015854E-06,9.880754E-07,9.610564E-07,9.347762E-07,
9.092147E-07,8.843522E-07,8.601696E-07,8.366482E-07,8.137700E-07,7.915174E-07,
7.698733E-07,7.488211E-07,7.283445E-07,7.084279E-07,6.890559E-07,6.702136E-07,
6.518866E-07,6.340607E-07,6.167223E-07,5.998580E-07,5.834549E-07,5.675003E-07,
5.519819E-07,5.368880E-07,5.222067E-07,5.079270E-07,4.940377E-07,4.805282E-07,
4.673881E-07,4.546074E-07,4.421761E-07,4.300848E-07,4.183241E-07,4.068850E-07,
3.957587E-07,3.849367E-07,3.744105E-07,3.641723E-07,3.542140E-07,3.445280E-07,
3.351068E-07,3.259433E-07,3.170304E-07,3.083612E-07,2.999290E-07,2.917274E-07,
2.837501E-07,2.759910E-07,2.684440E-07,2.611034E-07,2.539635E-07,2.470188E-07,
2.402641E-07,2.336941E-07,2.273037E-07,2.210881E-07,2.150424E-07,2.091620E-07,
2.034425E-07,1.978794E-07,1.924683E-07,1.872053E-07,1.820861E-07,1.771070E-07,
1.722640E-07,1.675534E-07,1.629717E-07,1.585152E-07,1.541806E-07,1.499645E-07,
1.458637E-07,1.418751E-07,1.379955E-07,1.342220E-07,1.305517E-07,1.269817E-07,
1.235094E-07,1.201320E-07,1.168470E-07,1.136518E-07,1.105440E-07,1.075212E-07,
1.045810E-07,1.017212E-07,9.893968E-08,9.623416E-08,9.360264E-08,9.104307E-08,
8.855349E-08,8.613199E-08,8.377671E-08,8.148583E-08,7.925759E-08,7.709029E-08,
7.498225E-08,7.293186E-08,7.093753E-08,6.899774E-08,6.711100E-08,6.527584E-08,
6.349087E-08,6.175471E-08,6.006602E-08,5.842352E-08,5.682592E-08,5.527201E-08,
5.376060E-08,5.229051E-08,5.086062E-08,4.946984E-08,4.811708E-08,4.680132E-08,
4.552153E-08,4.427674E-08,4.306599E-08,4.188835E-08,4.074291E-08,3.962880E-08,
3.854515E-08,3.749113E-08,3.646593E-08,3.546877E-08,3.449887E-08,3.355550E-08,
3.263792E-08,3.174544E-08,3.087735E-08,3.003301E-08,2.921176E-08,2.841296E-08,
2.763601E-08,2.688030E-08,2.614526E-08,2.543031E-08,2.473492E-08,2.405854E-08,
2.340066E-08,2.276077E-08,2.213837E-08,2.153300E-08,2.094418E-08,2.037146E-08,
1.981440E-08,1.927257E-08,1.874556E-08,1.823296E-08,1.773438E-08,1.724944E-08,
1.677775E-08,1.631896E-08,1.587272E-08,1.543868E-08,1.501651E-08,1.460588E-08,
1.420648E-08,1.381800E-08,1.344015E-08,1.307263E-08,1.271516E-08,1.236746E-08,
1.202927E-08,1.170033E-08,1.138038E-08,1.106919E-08,1.076650E-08,1.047209E-08,
1.018573E-08,9.907199E-09,9.636286E-09,9.372782E-09,9.116482E-09,8.867192E-09,
8.624718E-09,8.388874E-09,8.159480E-09,7.936359E-09,7.719339E-09,7.508253E-09,
7.302939E-09,7.103240E-09,6.909002E-09,6.720075E-09,6.536314E-09,6.357578E-09,
6.183730E-09,6.014635E-09,5.850165E-09,5.690192E-09,5.534593E-09,5.383249E-09],
[1.133578E-07,1.111350E-07,1.089557E-07,1.068191E-07,1.047245E-07,1.026709E-07,
1.006576E-07,9.868374E-08,9.674861E-08,9.485143E-08,9.299145E-08,9.116795E-08,
8.938020E-08,8.762751E-08,8.590918E-08,8.422456E-08,8.257297E-08,8.095376E-08,
7.936631E-08,7.780998E-08,7.628418E-08,7.478829E-08,7.332174E-08,7.188394E-08,
7.047434E-08,6.909238E-08,6.773752E-08,6.640923E-08,6.510699E-08,6.383028E-08,
6.257861E-08,6.135148E-08,6.014841E-08,5.896894E-08,5.781259E-08,5.667892E-08,
5.556749E-08,5.447784E-08,5.340956E-08,5.236223E-08,5.133544E-08,5.032879E-08,
4.934187E-08,4.837431E-08,4.742571E-08,4.649573E-08,4.558397E-08,4.469010E-08,
4.381375E-08,4.295459E-08,4.211228E-08,4.128648E-08,4.047688E-08,3.968315E-08,
3.890499E-08,3.814209E-08,3.739414E-08,3.666087E-08,3.594197E-08,3.523717E-08,
3.454619E-08,3.386876E-08,3.320462E-08,3.255349E-08,3.191514E-08,3.128930E-08,
3.067574E-08,3.007421E-08,2.948447E-08,2.890630E-08,2.833946E-08,2.778374E-08,
2.723892E-08,2.670478E-08,2.618112E-08,2.566772E-08,2.516439E-08,2.467093E-08,
2.418715E-08,2.371286E-08,2.324786E-08,2.279199E-08,2.234505E-08,2.190688E-08,
2.147730E-08,2.105614E-08,2.064324E-08,2.023844E-08,1.984158E-08,1.945250E-08,
1.907104E-08,1.869707E-08,1.833043E-08,1.797099E-08,1.761859E-08,1.727310E-08,
1.693438E-08,1.660231E-08,1.627675E-08,1.595757E-08,1.564465E-08,1.533787E-08,
1.503710E-08,1.474223E-08,1.445315E-08,1.416973E-08,1.389187E-08,1.361946E-08,
1.335239E-08,1.309056E-08,1.283386E-08,1.258220E-08,1.233547E-08,1.209358E-08,
1.185643E-08,1.162393E-08,1.139599E-08,1.117252E-08,1.095344E-08,1.073865E-08,
1.052807E-08,1.032162E-08,1.011922E-08,9.920788E-09,9.726248E-09,9.535522E-09,
9.348536E-09,9.165217E-09,8.985493E-09,8.809293E-09,8.636548E-09,8.467190E-09,
8.301154E-09,8.138373E-09,7.978785E-09,7.822326E-09,7.668935E-09,7.518552E-09,
7.371117E-09,7.226574E-09,7.084866E-09,6.945936E-09,6.809730E-09,6.676195E-09,
6.545279E-09,6.416930E-09,6.291098E-09,6.167734E-09,6.046788E-09,5.928214E-09,
5.811966E-09,5.697997E-09,5.586262E-09,5.476719E-09,5.369324E-09,5.264035E-09,
5.160810E-09,5.059610E-09,4.960394E-09,4.863124E-09,4.767761E-09,4.674268E-09,
4.582609E-09,4.492746E-09,4.404646E-09,4.318274E-09,4.233595E-09,4.150577E-09,
4.069187E-09,3.989392E-09,3.911163E-09,3.834467E-09,3.759276E-09,3.685559E-09,
3.613287E-09,3.542433E-09,3.472968E-09,3.404865E-09,3.338098E-09,3.272640E-09,
3.208465E-09,3.145549E-09,3.083867E-09,3.023394E-09,2.964107E-09,2.905983E-09,
2.848998E-09,2.793131E-09,2.738360E-09,2.684662E-09,2.632017E-09,2.580405E-09,
2.529805E-09,2.480197E-09,2.431562E-09,2.383880E-09,2.337134E-09,2.291304E-09,
2.246373E-09,2.202323E-09,2.159137E-09,2.116798E-09,2.075288E-09,2.034593E-09,
1.994696E-09,1.955581E-09,1.917234E-09,1.879638E-09,1.842779E-09,1.806644E-09,
1.771216E-09,1.736484E-09,1.702433E-09,1.669049E-09,1.636320E-09,1.604233E-09,
1.572775E-09,1.541933E-09,1.511697E-09,1.482054E-09,1.452991E-09,1.424499E-09,
1.396566E-09,1.369180E-09,1.342331E-09,1.316009E-09,1.290203E-09,1.264903E-09,
1.240099E-09,1.215781E-09,1.191940E-09,1.168567E-09,1.145652E-09,1.123187E-09,
1.101162E-09,1.079568E-09,1.058399E-09,1.037644E-09,1.017297E-09,9.973481E-10,
9.777907E-10,9.586168E-10,9.398189E-10,9.213897E-10,9.033218E-10,8.856082E-10,
8.682420E-10,8.512163E-10,8.345244E-10,8.181599E-10,8.021163E-10,7.863873E-10,
7.709667E-10,7.558485E-10,7.410268E-10,7.264957E-10,7.122496E-10,6.982828E-10,
6.845899E-10,6.711655E-10,6.580044E-10,6.451013E-10,6.324513E-10,6.200493E-10,
6.078905E-10,5.959701E-10,5.842835E-10,5.728261E-10,5.615933E-10,5.505808E-10,
5.397842E-10,5.291994E-10,5.188221E-10,5.086483E-10,4.986741E-10,4.888954E-10,
4.793084E-10,4.699095E-10,4.606948E-10,4.516609E-10,4.428041E-10,4.341210E-10,
4.256081E-10,4.172622E-10,4.090800E-10,4.010581E-10,3.931936E-10,3.854834E-10,
3.779243E-10,3.705134E-10,3.632479E-10,3.561248E-10,3.491414E-10,3.422949E-10,
3.355828E-10,3.290022E-10,3.225506E-10,3.162256E-10,3.100246E-10,3.039452E-10,
2.979851E-10,2.921418E-10,2.864130E-10,2.807966E-10,2.752904E-10,2.698921E-10,
2.645997E-10,2.594111E-10,2.543242E-10,2.493370E-10,2.444477E-10,2.396542E-10,
2.349547E-10,2.303474E-10,2.258304E-10,2.214020E-10,2.170605E-10,2.128041E-10,
2.086311E-10,2.045400E-10,2.005291E-10,1.965968E-10,1.927417E-10,1.889621E-10,
1.852567E-10,1.816239E-10,1.780624E-10,1.745707E-10,1.711475E-10,1.677914E-10,
1.645011E-10,1.612753E-10,1.581128E-10,1.550123E-10,1.519726E-10,1.489925E-10,
1.460709E-10,1.432065E-10,1.403983E-10,1.376452E-10,1.349461E-10,1.322999E-10,
1.297055E-10,1.271621E-10,1.246685E-10,1.222238E-10,1.198271E-10,1.174774E-10,
1.151737E-10,1.129152E-10,1.107010E-10,1.085302E-10,1.064020E-10,1.043156E-10,
1.022700E-10,1.002645E-10,9.829841E-11,9.637084E-11,9.448107E-11,9.262835E-11,
9.081196E-11,8.903120E-11,8.728535E-11,8.557374E-11,8.389569E-11,8.225054E-11]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
ted_empty.hectare_to_acre = 2.47105
ted_empty.gms_to_mg = 1000.
ted_empty.lbs_to_gms = 453.592
ted_empty.crop_hgt = 1. # m
ted_empty.hectare_area = 10000. # m2
ted_empty.m3_to_liters = 1000.
ted_empty.mass_plant = 25000. # kg/hectare
ted_empty.density_plant = 0.77 # kg/L
# internally calculated variable (hlc in atm-m3/mol are 2.0e-7, 1.0e-5, 3.5e-6)
ted_empty.log_unitless_hlc = pd.Series([-5.087265, -3.388295, -3.844227], dtype='float')
# input variables that change per simulation
ted_empty.log_kow = pd.Series([2.75, 4., 6.], dtype='float')
ted_empty.foliar_diss_hlife = pd.Series([15., 25., 35.])
ted_empty.app_rate_min = pd.Series([0.18, 0.5, 1.25]) # lbs a.i./acre
# application scenarios generated from 'daily_app_flag' tests and reused here
daily_flag = pd.Series([[True, False, False, True, False, False, True, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, True, False, False,
False, False, False, False, True, False, False, False, False, False,
False, True, False, False, False, False, False, False, True, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False]], dtype='bool')
for i in range(3):
result[i] = ted_empty.daily_canopy_air_timeseries(i, ted_empty.app_rate_min[i], daily_flag[i])
# tolerance set to 1e-3 instead of 1e-4 because precision in specifying constants between this code and the OPP TED spreadsheet
npt.assert_allclose(result[i],expected_results[i],rtol=1e-3, atol=0, err_msg='', verbose=True)
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_set_max_drift_distance(self):
"""
:description sets the maximum distance from applicaiton source area for which spray drift calculations are calculated
:param app_method; application method (aerial/ground/airblast)
:param max_spray_drift_dist: maximum distance from applicaiton source area for which spray drift calculations are calculated (feet)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([2600., 1000., 1000.], dtype='float')
result = pd.Series([], dtype='float')
try:
ted_empty.num_simulations = 3
# input variable that change per simulation
ted_empty.app_method_min = pd.Series(['aerial', 'ground', 'airblast'], dtype='object')
for i in range(ted_empty.num_simulations):
result[i] = ted_empty.set_max_drift_distance(ted_empty.app_method_min[i])
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_set_max_respire_frac(self):
"""
:description provides parmaeter values to use when calculating distances from edge of application source area to
concentration of interest
:param app_method; application method (aerial/ground/airblast)
:param drop_size; droplet spectrum for application (see list below for aerial/ground - 'NA' if airblast)
:param max_respire_frac; volumetric fraction of droplet spectrum not exceeding the upper size liit of respired particles for birds
:NOTE this represents specification from OPP TED Excel 'inputs' worksheet columns H & I rows 14 - 16
these values are used in the 'min/max rate doses' worksheet column S (while referenced here as the MAX of
three values specified in the 'inputs' worksheet (one per application method) the MAX will always be the value associated
with the application method specified for the simulation (i.e., the value specified below)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([0.28, 0.067, 0.028, 0.02, 0.28, 0.067, 0.28], dtype='float')
result = pd.Series([], dtype='float')
try:
ted_empty.num_simulations = 7
# input variable that change per simulation
ted_empty.app_method_min = pd.Series(['aerial', 'aerial','aerial','aerial', 'ground', 'ground', 'airblast'], dtype='object')
ted_empty.droplet_spec_min = pd.Series(['very_fine_to_fine', 'fine_to_medium','medium_to_coarse','coarse_to_very_coarse',
'very_fine_to_fine', 'fine_to_medium-coarse', ' '], dtype='object')
for i in range(ted_empty.num_simulations):
result[i] = ted_empty.set_max_respire_frac(ted_empty.app_method_min[i], ted_empty.droplet_spec_min[i])
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_calc_plant_risk_distance(self):
"""
:description calculates the distance from the source area that plant toxicity thresholds occur
:NOTE represents columns C & D rows 32 to 51 in OPP TED Excel spreadsheet 'Plants' worksheet
(only calculated if health risk value is present;
if ratio of health risk value to applicatoin rate is greater than 1.0 then distance is set to 0.0 (i.e. at source area edge)
if distance is greater than max spray drift distance then distance is set to max spray drift distance
values for risk distances are not stored across simulations
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series(['nan', 0.0, 0.229889], dtype='float')
result = pd.Series([], dtype='float')
try:
ted_empty.num_simulations = 3
# input variable that change per simulation
health_to_app_ratio = pd.Series(['nan', 2.0, 0.5], dtype='float')
param_a = pd.Series([0.0292, 0.1913, 5.5513], dtype='float')
param_b = pd.Series([0.822, 1.2366, 0.8523], dtype='float')
param_c = pd.Series([0.6539, 1.0552, 1.0079], dtype='float')
ted_empty.max_distance_from_source =
|
pd.Series([1000., 2600., 1000.], dtype='float')
|
pandas.Series
|
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_timedelta64_dtype
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import CategoricalIndex, Series, Timedelta, Timestamp
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
IntervalArray,
PandasArray,
PeriodArray,
SparseArray,
TimedeltaArray,
)
class TestToIterable:
# test that we convert an iterable to python types
dtypes = [
("int8", int),
("int16", int),
("int32", int),
("int64", int),
("uint8", int),
("uint16", int),
("uint32", int),
("uint64", int),
("float16", float),
("float32", float),
("float64", float),
("datetime64[ns]", Timestamp),
("datetime64[ns, US/Eastern]", Timestamp),
("timedelta64[ns]", Timedelta),
]
@pytest.mark.parametrize("dtype, rdtype", dtypes)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable(self, index_or_series, method, dtype, rdtype):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([1], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype, obj",
[
("object", object, "a"),
("object", int, 1),
("category", object, "a"),
("category", int, 1),
],
)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_iterable_object_and_category(
self, index_or_series, method, dtype, rdtype, obj
):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([obj], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize("dtype, rdtype", dtypes)
def test_iterable_items(self, dtype, rdtype):
# gh-13258
# test if items yields the correct boxed scalars
# this only applies to series
s = Series([1], dtype=dtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype", dtypes + [("object", int), ("category", int)]
)
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable_map(self, index_or_series, dtype, rdtype):
# gh-13236
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([1], dtype=dtype)
result = s.map(type)[0]
if not isinstance(rdtype, tuple):
rdtype = tuple([rdtype])
assert result in rdtype
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_categorial_datetimelike(self, method):
i = CategoricalIndex([Timestamp("1999-12-31"), Timestamp("2000-12-31")])
result = method(i)[0]
assert isinstance(result, Timestamp)
def test_iter_box(self):
vals = [Timestamp("2011-01-01"), Timestamp("2011-01-02")]
s = Series(vals)
assert s.dtype == "datetime64[ns]"
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz is None
assert res == exp
vals = [
|
Timestamp("2011-01-01", tz="US/Eastern")
|
pandas.Timestamp
|
import pandas as pd
import pytest
from pandas.testing import assert_series_equal
from long_duration_mdk import ( # calc_change_in_reserve,
calc_benefit_reserve,
calc_continuance,
calc_discount,
calc_interpolation,
calc_pv,
calc_pvfnb,
)
def test_calc_continuance():
mortality_rate = pd.Series([0.01, 0.015, 0.02])
lapse_rate = pd.Series([0.2, 0.1, 0.05])
lives_ed = calc_continuance(mortality_rate, lapse_rate)
assert_series_equal(lives_ed, ((1 - mortality_rate) * (1 - lapse_rate)).cumprod())
lives_bd = lives_ed.shift(1, fill_value=1)
lives_md = calc_continuance(mortality_rate / 2, starting_duration=lives_bd)
assert_series_equal(lives_md, lives_bd * (1 - mortality_rate / 2))
def test_calc_discount():
interest_rate = pd.Series([0.03, 0.04, 0.05])
v_ed = calc_discount(interest_rate)
assert_series_equal(v_ed, pd.Series([0.970874, 0.933532, 0.889079]))
v_md = calc_discount(interest_rate, t_adj=0.5)
assert_series_equal(v_md, pd.Series([0.985329, 0.952020, 0.911034]))
v_bd = calc_discount(interest_rate, t_adj=0)
assert_series_equal(v_bd, pd.Series([1, 0.970874, 0.933532]))
def test_calc_interpolation():
# test nonzero values
val_0 = pd.Series([1, 2, 3])
val_1 = pd.Series([2, 3, 4])
wt_0 = pd.Series([0.5, 0.5, 0.5])
linear = calc_interpolation(val_0, val_1, wt_0, method="linear")
assert_series_equal(linear, pd.Series([1.5, 2.5, 3.5]))
log = calc_interpolation(val_0, val_1, wt_0, method="log-linear")
assert_series_equal(log, pd.Series([1.414214, 2.449490, 3.464102]))
# test one zero value
val_0 = pd.Series([0, 1, 2])
val_1 = pd.Series([1, 2, 3])
wt_0 = pd.Series([0.5, 0.5, 0.5])
linear = calc_interpolation(val_0, val_1, wt_0, method="linear")
assert_series_equal(linear, pd.Series([0.5, 1.5, 2.5]))
log = calc_interpolation(val_0, val_1, wt_0, method="log-linear")
assert_series_equal(log, pd.Series([0.414214, 1.449490, 2.464102]))
# test two zero values
val_0 = pd.Series([0, 0, 1])
val_1 = pd.Series([0, 1, 2])
wt_0 = pd.Series([0.5, 0.5, 0.5])
linear = calc_interpolation(val_0, val_1, wt_0, method="linear")
assert_series_equal(linear, pd.Series([0, 0.5, 1.5]))
log = calc_interpolation(val_0, val_1, wt_0, method="log-linear")
assert_series_equal(log, pd.Series([0, 0.414214, 1.449490]))
# test value less than zero
val_0 =
|
pd.Series([-1, 0, 1])
|
pandas.Series
|
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# Describe
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
result = df.describe(include='all')
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api('count', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('sum', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('nunique', float_frame, float_string_frame)
assert_stat_op_api('mean', float_frame, float_string_frame)
assert_stat_op_api('product', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
assert_stat_op_api('min', float_frame, float_string_frame)
assert_stat_op_api('max', float_frame, float_string_frame)
assert_stat_op_api('mad', float_frame, float_string_frame)
assert_stat_op_api('var', float_frame, float_string_frame)
assert_stat_op_api('std', float_frame, float_string_frame)
assert_stat_op_api('sem', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
try:
from scipy.stats import skew, kurtosis # noqa:F401
assert_stat_op_api('skew', float_frame, float_string_frame)
assert_stat_op_api('kurt', float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc('nunique', nunique, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
# mixed types (with upcasting happening)
assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
check_dtype=False, check_less_precise=True)
assert_stat_op_calc('sum', np.sum, float_frame_with_na,
skipna_alternative=np.nansum)
assert_stat_op_calc('mean', np.mean, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('product', np.prod, float_frame_with_na)
assert_stat_op_calc('mad', mad, float_frame_with_na)
assert_stat_op_calc('var', var, float_frame_with_na)
assert_stat_op_calc('std', std, float_frame_with_na)
assert_stat_op_calc('sem', sem, float_frame_with_na)
assert_stat_op_calc('count', count, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
try:
from scipy import skew, kurtosis # noqa:F401
assert_stat_op_calc('skew', skewness, float_frame_with_na)
assert_stat_op_calc('kurt', kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
check_dates=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = pd.DataFrame({"A": [1, 1],
"B": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series([1.0], index=['A'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_excludeds_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Our long-term desired behavior is unclear, but the behavior in
# 0.24.0rc1 was buggy.
df = pd.DataFrame({"A": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series()
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_kurt(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E':
|
Categorical([np.nan], categories=['a'])
|
pandas.Categorical
|
# -*- coding: utf-8 -*-
"""toxic.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1hd60tjRhTRN0wo5TbhlP9fzp8xnoEU43
"""
from google.colab import drive
drive.mount('/content/drive')
import pandas as pd, numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
train =
|
pd.read_csv('/content/drive/My Drive/datasets/hatekeyword.csv')
|
pandas.read_csv
|
"""
@author: ravi
"""
import yfinance
from pandas import DataFrame, read_csv
from stockkit.general import config, Methods
from os import path, mkdir, makedirs
class DataSource(object):
def __init__(self, name):
self.name = name
def get_historic_prices(self, ticker, period):
pass
def _write_to_file(self):
pass
def _read_from_file(self):
pass
class GoogleFinance(DataSource):
def __init__(self):
super().__init__("google")
def get_historic_prices(self, ticker, period):
pass
class YahooFinance(DataSource):
def __init__(self):
super().__init__("yahoo")
self.downloaded_data =
|
DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# E N E R G Y S Y S T E M
# ==============================================================================
"""
* File name: energySystem.py
* Last edited: 2020-06-14
* Created by: <NAME> (TU Berlin)
The EnergySystem class is aristopy's main model container. An instance of
the EnergySystem class holds the modeled components, the overall pyomo
model and the results of the optimization.
The EnergySystem class provides features to built and solve the
optimization problem, manipulate the associated component models, and
process the results of the optimization. The implemented class methods are:
* :meth:`cluster <aristopy.energySystem.EnergySystem.cluster>`:
Perform clustering of the implemented time series data
* :meth:`declare_model <aristopy.energySystem.EnergySystem.declare_model>`:
Declare the pyomo optimization model
* :meth:`optimize <aristopy.energySystem.EnergySystem.optimize>`:
Call the main optimization routine
* :meth:`relax_integrality
<aristopy.energySystem.EnergySystem.relax_integrality>`:
Relax the integrality of binary variables
* :meth:`edit_component_variables
<aristopy.energySystem.EnergySystem.edit_component_variables>`:
Edit properties of component variables, e.g., change bounds or domains
* :meth:`reset_component_variables
<aristopy.energySystem.EnergySystem.reset_component_variables>`:
Reset component variables after applying changes, e.g., relaxation
* :meth:`export_component_configuration
<aristopy.energySystem.EnergySystem.export_component_configuration>`,
:meth:`import_component_configuration
<aristopy.energySystem.EnergySystem.import_component_configuration>`,:
Export and import configurations, i.e. component existences and capacities
* :meth:`add_design_integer_cut_constraint
<aristopy.energySystem.EnergySystem.add_design_integer_cut_constraint>`:
Create integer-cut-constraints to exclude the current design solution
from the solution space and enforce a new design in subsequent model runs
* :meth:`add_variable <aristopy.energySystem.EnergySystem.add_variable>`,
:meth:`add_constraint <aristopy.energySystem.EnergySystem.add_constraint>`,
:meth:`add_objective_function_contribution
<aristopy.energySystem.EnergySystem.add_objective_function_contribution>`:
Add variables, constraints and objective function contributions directly
to the main pyomo model, outside of the component declaration
"""
import os
import time
import json
from collections import OrderedDict
import pandas as pd
import pyomo.environ as pyo
import pyomo.network as network
import pyomo.opt as opt
from tsam.timeseriesaggregation import TimeSeriesAggregation
from aristopy import utils, logger
class EnergySystem:
def __init__(self, number_of_time_steps=8760, hours_per_time_step=1,
interest_rate=0.05, economic_lifetime=20, logging=None):
"""
Initialize an instance of the EnergySystem class.
:param number_of_time_steps: Number of considered time steps for
modeling the dispatch problem. With "hours_per_time_step" the
share of the modeled year can be calculated. In this way, the cost
of each time step is scaled and included in the objective function.
|br| *Default: 8760*
:type number_of_time_steps: int (>0)
:param hours_per_time_step: Number of hours per modeled time step.
|br| *Default: 1*
:type hours_per_time_step: int (>0)
:param interest_rate: Value to calculate the present value factor of a
cost rate that occurs in the future.
|br| *Default: 0.05 (corresponds to 5%)*
:type interest_rate: float, int (>=0)
:param economic_lifetime: Years to consider for calculating the net
present value of an investment with annual incoming and outgoing
cash flows.
|br| *Default: 20*
:type economic_lifetime: int (>0)
:param logging: Specify the behavior of the logging by setting an own
Logger class instance. User can decide where to log (file/console)
and what to log (see description of aristopy "Logger").
|br| *Default: None (display minimal logging in the console)*
:type logging: None or instance of aristopy's "Logger" class
"""
# Check user input:
utils.check_energy_system_input(
number_of_time_steps, hours_per_time_step, interest_rate,
economic_lifetime, logging)
# **********************************************************************
# Logging
# **********************************************************************
# If no logger instance is passed to the "logging" keyword a default
# logger is initialized. This will only display errors on the console.
# Otherwise the passed logger instance is used and a logger for the
# instance of the energy system class is initialized on "self.log"
if logging is None:
self.logger = logger.Logger(default_log_handler='stream',
default_log_level='ERROR')
else:
self.logger = logging
self.log = self.logger.get_logger(self)
# **********************************************************************
# Time and clustering
# **********************************************************************
self.number_of_time_steps = number_of_time_steps
self.hours_per_time_step = hours_per_time_step
self.number_of_years = number_of_time_steps * hours_per_time_step/8760.0
# Initialization: Overwritten if 'cluster' function is called
self.periods = [0]
self.periods_order = [0]
self.period_occurrences = [1]
self.number_of_time_steps_per_period = number_of_time_steps
self.inter_period_time_steps = [0, 1] # one before & after only period
# Flag 'is_data_clustered' indicates if the function 'cluster' has been
# called before. The flag is reset to False if new components are added.
self.is_data_clustered = False
# 'typical_periods' is altered by function 'cluster' to an array ranging
# from 0 to number_of_typical_periods-1.
self.typical_periods = [0]
# Stores the instance of the time series aggregation (if performed)
self.tsa_instance = None
# **********************************************************************
# Economics
# **********************************************************************
# The economic criterion net present value represents the objective
# function value to be maximized. Hence, a present value factor (pvf) is
# required to calculate the present value of an annuity. The global
# parameters interest rate and economic lifetime of the energy system
# investment are used to this end.
self.pvf = sum(1 / (1 + interest_rate)**n
for n in range(1, economic_lifetime+1))
# **********************************************************************
# Optimization
# **********************************************************************
# The parameter 'model' holds the pyomo ConcreteModel instance with
# sets, parameters, variables, constraints and the objective function.
# It is None during initialization and changed when the functions
# 'optimize', or 'declare_model' are called.
# Before the model instance is optimized, a solver instance is assigned
# to the "solver" attribute. It also stores basic results of the run.
# The "is_model_declared" flag indicates if the model instance is
# already declared.
# The "is_persistent_model_declared" flag states if the model has been
# declared and assigned to a persistent solver instance.
self.model = None
self.run_info = {'solver_name': '',
'time_limit': None, 'optimization_specs': '',
'model_build_time': 0, 'model_solve_time': 0,
'upper_bound': 0, 'lower_bound': 0, 'sense': '',
'solver_status': '', 'termination_condition': ''}
self.solver = None
self.is_model_declared = False
self.is_persistent_model_declared = False
# **********************************************************************
# Components
# **********************************************************************
# 'components' is a dictionary {component name: component object itself}
# in which all components of the EnergySystem instance are stored.
# The pyomo block model object (stored variables and constraints) of a
# component instance can be accessed via its "block" attribute.
self.components = {}
# The 'component_connections' is a dict that stores the connections of
# the component instances of the energy system model. It is formed from
# the specified inlets and outlets and the connecting commodity:
# {arc_name: [source instance, destination instance, commodity_name]}
self.component_connections = {}
# 'component_configuration' is a pandas Dataframe to store basic
# information about the availability and capacity of the modelled
# components. It is used to export / import the configuration results.
self.component_configuration = pd.DataFrame(
index=[utils.BI_EX, utils.BI_MODULE_EX, utils.CAP])
# DataFrames and dictionaries to store additionally added pyomo objects
# (variables and constraints) and objective function contributions.
self.added_constraints = pd.DataFrame(index=['has_time_set',
'alternative_set', 'rule'])
self.added_variables = pd.DataFrame(index=['domain', 'has_time_set',
'alternative_set', 'init',
'ub', 'lb', 'pyomo'])
self.added_objective_function_contributions = {}
self.added_obj_contributions_results = {}
self.log.info('Initializing EnergySystem completed!')
def __repr__(self):
# Define class format for printing and logging
return '<EnSys: "id=%s..%s">' % (hex(id(self))[:3], hex(id(self))[-3:])
def add_variable(self, var):
"""
Function to manually add pyomo variables to the main pyomo model
(ConcreteModel: model) of the energy system instance via instances
of aristopy's Var class. The attributes of the variables are stored in
DataFrame "added_variables" and later initialized during the call of
function 'optimize', or 'declare_model'.
:param var: Instances of aristopy's Var class (single or in list)
"""
self.log.info('Call of function "add_variable"')
# Check the correctness of the user input
var_list = utils.check_add_vars_input(var)
for v in var_list:
# Wrap everything up in a pandas Series
series = pd.Series({'has_time_set': v.has_time_set,
'alternative_set': v.alternative_set,
'domain': v.domain, 'init': v.init,
'ub': v.ub, 'lb': v.lb, 'pyomo': None})
# Add the Series with new name to DataFrame "added_variables"
self.added_variables[v.name] = series
def add_constraint(self, rule, name=None, has_time_set=True,
alternative_set=None):
"""
Function to manually add constraints to the main pyomo model after the
instance has been created. The attributes are stored in the DataFrame
'added_constraints' and later initialized during the call of function
'optimize', or 'declare_model'.
:param rule: A Python function that specifies the constraint with a
equality or inequality expression. The rule must hold at least
two arguments: First the energy system instance it is added to (in
most cases: self), second the ConcreteModel of the instance (model).
Additional arguments represent sets (e.g., time).
:type rule: function
:param name: Name (identifier) of the added constraint. The rule name is
used if no name is specified.
|br| *Default: None*
:type name: str
:param has_time_set: Is True if the time set of the energy system model
is also a set of the added constraint.
|br| *Default: True*
:type has_time_set: bool
:param alternative_set: Alternative constraint sets can be added here
via iterable Python objects (e.g. list).
|br| *Default: None*
"""
self.log.info('Call of function "add_constraint"')
# Check the correctness of the user input
utils.check_add_constraint(rule, name, has_time_set, alternative_set)
# The rule name is used as constraint identifier if no name is given
if name is None:
name = rule.__name__
# Put everything together in a pandas Series
series = pd.Series({'has_time_set': has_time_set,
'alternative_set': alternative_set,
'rule': rule})
# Add the Series to the DataFrame "added_constraints"
self.added_constraints[name] = series
def add_objective_function_contribution(self, rule, name=None):
"""
Additional objective function contributions can be added with this
method. The method requires a Python function input that takes the main
pyomo model (ConcreteModel: model) and returns a single (scalar) value.
:param rule: A Python function returning a scalar value which is added
to the objective function of the model instance. The rule must hold
exactly two arguments: The energy system instance it is added to (in
most cases: self), second the ConcreteModel of the instance (model).
:type rule: function
:param name: Name (identifier) of the added objective function
contribution. The rule name is used if no name is specified.
|br| *Default: None*
:type name: str
"""
self.log.info('Call of function "add_objective_function_contribution"')
# Check the input:
assert isinstance(name, (str, type(None))), '"name" should be a string!'
if not callable(rule):
raise TypeError('The "rule" needs to hold a callable object!')
if name is None:
name = rule.__name__
# Add the rule and the name to a dictionary of the EnergySystem instance
self.added_objective_function_contributions[name] = rule
def cluster(self, number_of_typical_periods=4,
number_of_time_steps_per_period=24,
cluster_method='hierarchical',
**kwargs):
"""
Method for the aggregation and clustering of time series data. First,
the time series data and their respective weights are collected from all
components and split into pieces with equal length of
'number_of_time_steps_per_period'.
Subsequently, a clustering method is called and each period is assigned
to one of 'number_of_typical_periods' typical periods. The clustered
data is later stored in the components.
The package `tsam <https://github.com/FZJ-IEK3-VSA/tsam>`_ (time series
aggregation module) is used to perform the clustering.
The clustering algorithm can be controlled by adding required keyword
arguments (using 'kwargs' parameter). To learn more about tsam and
possible keyword arguments see the package `documentation
<https://tsam.readthedocs.io/en/latest/index.html>`_.
:param number_of_typical_periods: Number of typical periods to be
clustered. |br| *Default: 4*
:type number_of_typical_periods: int (>0)
:param number_of_time_steps_per_period: Number of time steps per period
|br| *Default: 24*
:type number_of_time_steps_per_period: int (>0)
:param cluster_method: Name of the applied clustering method (e.g.,
'k_means'). See the tsam documentation for all possible options.
|br| *Default: 'hierarchical'*
:type cluster_method: str
"""
# Check input arguments
utils.check_cluster_input(number_of_typical_periods,
number_of_time_steps_per_period,
self.number_of_time_steps)
time_start = time.time()
self.log.info('Start clustering with %s typical periods and %s time '
'steps per period.' % (number_of_typical_periods,
number_of_time_steps_per_period))
# Get time series data and their respective weights from all components
# and collect them in two dictionaries
time_series_data, time_series_weights = {}, {}
for comp in self.components.values():
if comp.number_in_group == 1: # Add only once per group
data, weights = comp.get_time_series_data_for_aggregation()
time_series_data.update(data)
time_series_weights.update(weights)
# Convert data dictionary to pandas DataFrame
time_series_data = pd.DataFrame.from_dict(time_series_data)
# Specific index is not relevant, but tsam requires a uniform index
time_series_data.index = \
pd.date_range('2050-01-01 00:30:00',
periods=self.number_of_time_steps,
freq=(str(self.hours_per_time_step) + 'H'),
tz='Europe/Berlin')
# Reindex axis for reproducibility of TimeSeriesAggregation results
time_series_data = time_series_data.reindex(
sorted(time_series_data.columns), axis=1)
# Set up instance of tsam's TimeSeriesAggregation class and cluster data
self.tsa_instance = TimeSeriesAggregation(
timeSeries=time_series_data,
noTypicalPeriods=number_of_typical_periods,
hoursPerPeriod=
number_of_time_steps_per_period * self.hours_per_time_step,
clusterMethod=cluster_method,
weightDict=time_series_weights,
**kwargs)
# Store clustered time series data in the components
data =
|
pd.DataFrame.from_dict(self.tsa_instance.clusterPeriodDict)
|
pandas.DataFrame.from_dict
|
# -*- coding: utf-8 -*-
import pandas as pd
import sqlalchemy as sql
'''
@author:<NAME>
@version:0.0.1
'''
"""
Copyright © 2020 <copyright holders>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the “Software”), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# 数据输出类,输出各形式的数据,写入数据到文件系统或消息队列或数据库中
class output(object):
def to_csv(self,df,path,encoding):
try:
df.to_csv(path,encoding=encoding)
print("successed save file to %s",path)
except:
print("save file error")
def to_mysql(self,df,tablename,databasename,host,port,user,password):
try:
con = self.__get_con(host,user,password,port,databasename)
df.to_sql(name=tablename,con=con)
print('save to mysql successed')
except:
print('save to mysql has error')
def to_json(self,df):
try:
res = df.to_json()
return res
except:
print('to json error')
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
""" Project : PyCoA
Date : april 2020 - march 2021
Authors : <NAME>, <NAME>, <NAME>
Copyright ©pycoa.fr
License: See joint LICENSE file
Module : coa.geo
About :
-------
Geo classes within the PyCoA framework.
GeoManager class provides translations between naming normalisations
of countries. It's based on the pycountry module.
GeoInfo class allow to add new fields to a pandas DataFrame about
statistical information for countries.
GeoRegion class helps returning list of countries in a specified region
GeoCountry manages information for a single country.
"""
import inspect # for debug purpose
import warnings
import pycountry as pc
import pycountry_convert as pcc
import pandas as pd
import geopandas as gpd
import shapely.geometry as sg
import shapely.affinity as sa
import shapely.ops as so
import bs4
from coa.tools import verb,kwargs_test,get_local_from_url,dotdict,tostdstring
from coa.error import *
# ---------------------------------------------------------------------
# --- GeoManager class ------------------------------------------------
# ---------------------------------------------------------------------
class GeoManager():
"""GeoManager class definition. No inheritance from any other class.
It should raise only CoaError and derived exceptions in case
of errors (see pycoa.error)
"""
_list_standard=['iso2', # Iso2 standard, default
'iso3', # Iso3 standard
'name', # Standard name ( != Official, caution )
'num'] # Numeric standard
_list_db=[None,'jhu','worldometers','owid','opencovid19national','spfnational'] # first is default
_list_output=['list','dict','pandas'] # first is default
_standard = None # currently used normalisation standard
def __init__(self,standard=_list_standard[0]):
""" __init__ member function, with default definition of
the used standard. To get the current default standard,
see get_list_standard()[0].
"""
verb("Init of GeoManager() from "+str(inspect.stack()[1]))
self.set_standard(standard)
self._gr=GeoRegion()
def get_GeoRegion(self):
""" return the GeoRegion local instance
"""
return self._gr
def get_region_list(self):
""" return the list of region via the GeoRegion instance
"""
return self._gr.get_region_list()
def get_list_standard(self):
""" return the list of supported standard name of countries.
First one is default for the class
"""
return self._list_standard
def get_list_output(self):
""" return supported list of output type. First one is default
for the class
"""
return self._list_output
def get_list_db(self):
""" return supported list of database name for translation of
country names to standard.
"""
return self._list_db
def get_standard(self):
""" return current standard use within the GeoManager class
"""
return self._standard
def set_standard(self,standard):
"""
set the working standard type within the GeoManager class.
The standard should meet the get_list_standard() requirement
"""
if not isinstance(standard,str):
raise CoaTypeError('GeoManager error, the standard argument'
' must be a string')
if standard not in self.get_list_standard():
raise CoaKeyError('GeoManager.set_standard error, "'+\
standard+' not managed. Please see '\
'get_list_standard() function')
self._standard=standard
return self.get_standard()
def to_standard(self, w, **kwargs):
"""Given a list of string of locations (countries), returns a
normalised list according to the used standard (defined
via the setStandard() or __init__ function. Current default is iso2.
Arguments
-----------------
first arg -- w, list of string of locations (or single string)
to convert to standard one
output -- 'list' (default), 'dict' or 'pandas'
db -- database name to help conversion.
Default : None, meaning best effort to convert.
Known database : jhu, wordometer...
See get_list_db() for full list of known db for
standardization
interpret_region -- Boolean, default=False. If yes, the output should
be only 'list'.
"""
kwargs_test(kwargs,['output','db','interpret_region'],'Bad args used in the to_standard() function.')
output=kwargs.get('output',self.get_list_output()[0])
if output not in self.get_list_output():
raise CoaKeyError('Incorrect output type. See get_list_output()'
' or help.')
db=kwargs.get('db',self.get_list_db()[0])
if db not in self.get_list_db():
raise CoaDbError('Unknown database "'+db+'" for translation to '
'standardized location names. See get_list_db() or help.')
interpret_region=kwargs.get('interpret_region',False)
if not isinstance(interpret_region,bool):
raise CoaTypeError('The interpret_region argument is a boolean, '
'not a '+str(type(interpret_region)))
if interpret_region==True and output!='list':
raise CoaKeyError('The interpret_region True argument is incompatible '
'with non list output option.')
if isinstance(w,str):
w=[w]
elif not isinstance(w,list):
raise CoaTypeError('Waiting for str, list of str or pandas'
'as input of get_standard function member of GeoManager')
w=[v.title() for v in w] # capitalize first letter of each name
w0=w.copy()
if db:
w=self.first_db_translation(w,db)
n=[] # will contain standardized name of countries (if possible)
#for c in w:
while len(w)>0:
c=w.pop(0)
if type(c)==int:
c=str(c)
elif type(c)!=str:
raise CoaTypeError('Locations should be given as '
'strings or integers only')
if (c in self._gr.get_region_list()) and interpret_region == True:
w=self._gr.get_countries_from_region(c)+w
else:
if len(c)==0:
n1='' #None
else:
try:
n0=pc.countries.lookup(c)
except LookupError:
try:
if c.startswith('Owid_'):
nf=['owid_*']
n1='OWID_*'
else:
nf=pc.countries.search_fuzzy(c)
if len(nf)>1:
warnings.warn('Caution. More than one country match the key "'+\
c+'" : '+str([ (k.name+', ') for k in nf])+\
', using first one.\n')
n0=nf[0]
except LookupError:
raise CoaLookupError('No country match the key "'+c+'". Error.')
except Exception as e1:
raise CoaNotManagedError('Not managed error '+type(e1))
except Exception as e2:
raise CoaNotManagedError('Not managed error'+type(e1))
if n0 != 'owid_*':
if self._standard=='iso2':
n1=n0.alpha_2
elif self._standard=='iso3':
n1=n0.alpha_3
elif self._standard=='name':
n1=n0.name
elif self._standard=='num':
n1=n0.numeric
else:
raise CoaKeyError('Current standard is '+self._standard+\
' which is not managed. Error.')
n.append(n1)
if output=='list':
return n
elif output=='dict':
return dict(zip(w0, n))
elif output=='pandas':
return pd.DataFrame({'inputname':w0,self._standard:n})
else:
return None # should not be here
def first_db_translation(self,w,db):
""" This function helps to translate from country name to
standard for specific databases. It's the first step
before final translation.
One can easily add some database support adding some new rules
for specific databases
"""
translation_dict={}
# Caution : keys need to be in title mode, i.e. first letter capitalized
if db=='jhu':
translation_dict.update({\
"Congo (Brazzaville)":"Republic of the Congo",\
"Congo (Kinshasa)":"COD",\
"Korea, South":"KOR",\
"Taiwan*":"Taiwan",\
"Laos":"LAO",\
"West Bank And Gaza":"PSE",\
"Burma":"Myanmar",\
"Iran":"IRN",\
"<NAME>":"",\
"Ms Zaandam":"",\
"Summer Olympics 2020":"",\
"Micronesia":"FSM",\
}) # last two are names of boats
elif db=='worldometers':
translation_dict.update({\
"Dr Congo":"COD",\
"Congo":"COG",\
"Iran":"IRN",\
"South Korea":"KOR",\
"North Korea":"PRK",\
"Czech Republic (Czechia)":"CZE",\
"Laos":"LAO",\
"Sao Tome & Principe":"STP",\
"Channel Islands":"JEY",\
"St. Vincent & Grenadines":"VCT",\
"U.S. Virgin Islands":"VIR",\
"Saint Kitts & Nevis":"KNA",\
"Faeroe Islands":"FRO",\
"Caribbean Netherlands":"BES",\
"Wallis & Futuna":"WLF",\
"Saint Pierre & Miquelon":"SPM",\
"Sint Maarten":"SXM",\
} )
elif db=='owid':
translation_dict.update({\
"Bonaire Sint Eustatius And Saba":"BES",\
"Cape Verde":"CPV",\
"Democratic Republic Of Congo":"COD",\
"Faeroe Islands":"FRO",\
"Laos":"LAO",\
"South Korea":"KOR",\
"Swaziland":"SWZ",\
"United States Virgin Islands":"VIR",\
"Iran":"IRN",\
"Micronesia (Country)":"FSM",\
"Northern Cyprus":"CYP",\
"Curacao":"CUW",\
"Faeroe Islands":"FRO",\
"Vatican":"VAT"
})
return [translation_dict.get(k,k) for k in w]
# ---------------------------------------------------------------------
# --- GeoInfo class ---------------------------------------------------
# ---------------------------------------------------------------------
class GeoInfo():
"""GeoInfo class definition. No inheritance from any other class.
It should raise only CoaError and derived exceptions in case
of errors (see pycoa.error)
"""
_list_field={\
'continent_code':'pycountry_convert (https://pypi.org/project/pycountry-convert/)',\
'continent_name':'pycountry_convert (https://pypi.org/project/pycountry-convert/)' ,\
'country_name':'pycountry_convert (https://pypi.org/project/pycountry-convert/)' ,\
'population':'https://www.worldometers.info/world-population/population-by-country/',\
'area':'https://www.worldometers.info/world-population/population-by-country/',\
'fertility':'https://www.worldometers.info/world-population/population-by-country/',\
'median_age':'https://www.worldometers.info/world-population/population-by-country/',\
'urban_rate':'https://www.worldometers.info/world-population/population-by-country/',\
#'geometry':'https://github.com/johan/world.geo.json/',\
'geometry':'http://thematicmapping.org/downloads/world_borders.php and https://github.com/johan/world.geo.json/',\
'region_code_list':'https://en.wikipedia.org/w/index.php?title=List_of_countries_by_United_Nations_geoscheme&oldid=1008989486',\
#https://en.wikipedia.org/wiki/List_of_countries_by_United_Nations_geoscheme',\
'region_name_list':'https://en.wikipedia.org/w/index.php?title=List_of_countries_by_United_Nations_geoscheme&oldid=1008989486',\
#https://en.wikipedia.org/wiki/List_of_countries_by_United_Nations_geoscheme',\
'capital':'https://en.wikipedia.org/w/index.php?title=List_of_countries_by_United_Nations_geoscheme&oldid=1008989486',\
#https://en.wikipedia.org/wiki/List_of_countries_by_United_Nations_geoscheme',\
'flag':'https://github.com/linssen/country-flag-icons/blob/master/countries.json',\
}
_data_geometry = pd.DataFrame()
_data_population =
|
pd.DataFrame()
|
pandas.DataFrame
|
from datetime import datetime, time, timedelta
from pandas.compat import range
import sys
import os
import nose
import numpy as np
from pandas import Index, DatetimeIndex, Timestamp, Series, date_range, period_range
import pandas.tseries.frequencies as frequencies
from pandas.tseries.tools import to_datetime
import pandas.tseries.offsets as offsets
from pandas.tseries.period import PeriodIndex
import pandas.compat as compat
from pandas.compat import is_platform_windows
import pandas.util.testing as tm
from pandas import Timedelta
def test_to_offset_multiple():
freqstr = '2h30min'
freqstr2 = '2h 30min'
result = frequencies.to_offset(freqstr)
assert(result == frequencies.to_offset(freqstr2))
expected = offsets.Minute(150)
assert(result == expected)
freqstr = '2h30min15s'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(150 * 60 + 15)
assert(result == expected)
freqstr = '2h 60min'
result = frequencies.to_offset(freqstr)
expected = offsets.Hour(3)
assert(result == expected)
freqstr = '15l500u'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(15500)
assert(result == expected)
freqstr = '10s75L'
result = frequencies.to_offset(freqstr)
expected = offsets.Milli(10075)
assert(result == expected)
freqstr = '2800N'
result = frequencies.to_offset(freqstr)
expected = offsets.Nano(2800)
assert(result == expected)
# malformed
try:
frequencies.to_offset('2h20m')
except ValueError:
pass
else:
assert(False)
def test_to_offset_negative():
freqstr = '-1S'
result = frequencies.to_offset(freqstr)
assert(result.n == -1)
freqstr = '-5min10s'
result = frequencies.to_offset(freqstr)
assert(result.n == -310)
def test_to_offset_leading_zero():
freqstr = '00H 00T 01S'
result = frequencies.to_offset(freqstr)
assert(result.n == 1)
freqstr = '-00H 03T 14S'
result = frequencies.to_offset(freqstr)
assert(result.n == -194)
def test_to_offset_pd_timedelta():
# Tests for #9064
td = Timedelta(days=1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(86401)
assert(expected==result)
td = Timedelta(days=-1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(-86399)
assert(expected==result)
td = Timedelta(hours=1, minutes=10)
result = frequencies.to_offset(td)
expected = offsets.Minute(70)
assert(expected==result)
td = Timedelta(hours=1, minutes=-10)
result = frequencies.to_offset(td)
expected = offsets.Minute(50)
assert(expected==result)
td = Timedelta(weeks=1)
result = frequencies.to_offset(td)
expected = offsets.Day(7)
assert(expected==result)
td1 = Timedelta(hours=1)
result1 = frequencies.to_offset(td1)
result2 = frequencies.to_offset('60min')
assert(result1 == result2)
td = Timedelta(microseconds=1)
result = frequencies.to_offset(td)
expected = offsets.Micro(1)
assert(expected == result)
td = Timedelta(microseconds=0)
tm.assertRaises(ValueError, lambda: frequencies.to_offset(td))
def test_anchored_shortcuts():
result = frequencies.to_offset('W')
expected = frequencies.to_offset('W-SUN')
assert(result == expected)
result1 = frequencies.to_offset('Q')
result2 = frequencies.to_offset('Q-DEC')
expected = offsets.QuarterEnd(startingMonth=12)
assert(result1 == expected)
assert(result2 == expected)
result1 = frequencies.to_offset('Q-MAY')
expected = offsets.QuarterEnd(startingMonth=5)
assert(result1 == expected)
def test_get_rule_month():
result = frequencies._get_rule_month('W')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Week())
assert(result == 'DEC')
result = frequencies._get_rule_month('D')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Day())
assert(result == 'DEC')
result =
|
frequencies._get_rule_month('Q')
|
pandas.tseries.frequencies._get_rule_month
|
import functools
import numpy as np
import scipy
import scipy.linalg
import scipy
import scipy.sparse as sps
import scipy.sparse.linalg as spsl
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import logging
import tables as tb
import os
import sandy
import pytest
pd.options.display.float_format = '{:.5e}'.format
__author__ = "<NAME>"
__all__ = [
"CategoryCov",
"EnergyCov",
"triu_matrix",
"corr2cov",
"random_corr",
"random_cov",
"sample_distribution",
]
S = np.array([[1, 1, 1],
[1, 2, 1],
[1, 3, 1]])
var = np.array([[0, 0, 0],
[0, 2, 0],
[0, 0, 3]])
minimal_covtest = pd.DataFrame(
[[9437, 2, 1e-2, 9437, 2, 1e-2, 0.02],
[9437, 2, 2e5, 9437, 2, 2e5, 0.09],
[9437, 2, 1e-2, 9437, 102, 1e-2, 0.04],
[9437, 2, 2e5, 9437, 102, 2e5, 0.05],
[9437, 102, 1e-2, 9437, 102, 1e-2, 0.01],
[9437, 102, 2e5, 9437, 102, 2e5, 0.01]],
columns=["MAT", "MT", "E", "MAT1", "MT1", 'E1', "VAL"]
)
def cov33csv(func):
def inner(*args, **kwargs):
key = "<KEY>"
kw = kwargs.copy()
if key in kw:
if kw[key]:
print(f"found argument '{key}', ignore oher arguments")
out = func(
*args,
index_col=[0, 1, 2],
header=[0, 1, 2],
)
out.index.names = ["MAT", "MT", "E"]
out.columns.names = ["MAT", "MT", "E"]
return out
else:
del kw[key]
out = func(*args, **kw)
return out
return inner
class _Cov(np.ndarray):
"""Covariance matrix treated as a `numpy.ndarray`.
Methods
-------
corr
extract correlation matrix
corr2cov
produce covariance matrix given correlation matrix and standard
deviation array
eig
get covariance matrix eigenvalues and eigenvectors
get_L
decompose and extract lower triangular matrix
sampling
draw random samples
"""
def __new__(cls, arr):
obj = np.ndarray.__new__(cls, arr.shape, float)
obj[:] = arr[:]
if not obj.ndim == 2:
raise sandy.Error("covariance matrix must have two dimensions")
if not np.allclose(obj, obj.T):
raise sandy.Error("covariance matrix must be symmetric")
if (np.diag(arr) < 0).any():
raise sandy.Error("covariance matrix must have positive variances")
return obj
@staticmethod
def _up2down(self):
U = np.triu(self)
L = np.triu(self, 1).T
C = U + L
return C
def eig(self):
"""
Extract eigenvalues and eigenvectors.
Returns
-------
`Pandas.Series`
real part of eigenvalues sorted in descending order
`np.array`
matrix of eigenvectors
"""
E, V = scipy.linalg.eig(self)
E, V = E.real, V.real
return E, V
def corr(self):
"""Extract correlation matrix.
.. note:: zeros on the covariance matrix diagonal are translated
into zeros also on the the correlation matrix diagonal.
Returns
-------
`sandy.formats.utils.Cov`
correlation matrix
"""
std = np.sqrt(np.diag(self))
with np.errstate(divide='ignore', invalid='ignore'):
coeff = np.true_divide(1, std)
coeff[~ np.isfinite(coeff)] = 0 # -inf inf NaN
corr = np.multiply(np.multiply(self.T, coeff).T, coeff)
return self.__class__(corr)
def _reduce_size(self):
"""
Reduces the size of the matrix, erasing the null values.
Returns
-------
nonzero_idxs : numpy.ndarray
The indices of the diagonal that are not null.
cov_reduced : sandy.core.cov._Cov
The reduced matrix.
"""
nonzero_idxs = np.flatnonzero(np.diag(self))
cov_reduced = self[nonzero_idxs][:, nonzero_idxs]
return nonzero_idxs, cov_reduced
@classmethod
def _restore_size(cls, nonzero_idxs, cov_reduced, dim):
"""
Restore the size of the matrix
Parameters
----------
nonzero_idxs : numpy.ndarray
The indices of the diagonal that are not null.
cov_reduced : sandy.core.cov._Cov
The reduced matrix.
dim : int
Dimension of the original matrix.
Returns
-------
cov : sandy.core.cov._Cov
Matrix of specified dimensions.
"""
cov = _Cov(np.zeros((dim, dim)))
for i, ni in enumerate(nonzero_idxs):
cov[ni, nonzero_idxs] = cov_reduced[i]
return cov
def sampling(self, nsmp, seed=None):
"""
Extract random samples from the covariance matrix, either using
the cholesky or the eigenvalue decomposition.
Parameters
----------
nsmp : `int`
number of samples
seed : `int`
seed for the random number generator (default is `None`)
Returns
-------
`np.array`
2D array of random samples with dimension `(self.shape[0], nsmp)`
"""
dim = self.shape[0]
np.random.seed(seed=seed)
y = np.random.randn(dim, nsmp)
nonzero_idxs, cov_reduced = self._reduce_size()
L_reduced = cov_reduced.get_L()
L = self.__class__._restore_size(nonzero_idxs, L_reduced, dim)
samples = np.array(L.dot(y))
return samples
def get_L(self):
"""
Extract lower triangular matrix `L` for which `L*L^T == self`.
Returns
-------
`np.array`
lower triangular matrix
"""
try:
L = scipy.linalg.cholesky(
self,
lower=True,
overwrite_a=False,
check_finite=False
)
except np.linalg.linalg.LinAlgError:
E, V = self.eig()
E[E <= 0] = 0
Esqrt = np.diag(np.sqrt(E))
M = V.dot(Esqrt)
Q, R = scipy.linalg.qr(M.T)
L = R.T
return L
class CategoryCov():
"""
Properties
----------
data
covariance matrix as a dataframe
size
first dimension of the covariance matrix
Methods
-------
corr2cov
create a covariance matrix given a correlation matrix and a standard
deviation vector
from_stack
create a covariance matrix from a stacked `pd.DataFrame`
from_stdev
construct a covariance matrix from a stdev vector
from_var
construct a covariance matrix from a variance vector
get_corr
extract correlation matrix from covariance matrix
get_eig
extract eigenvalues and eigenvectors from covariance matrix
get_L
extract lower triangular matrix such that $C=L L^T$
get_std
extract standard deviations from covariance matrix
invert
calculate the inverse of the matrix
sampling
extract perturbation coefficients according to chosen distribution
and covariance matrix
"""
def __repr__(self):
return self.data.__repr__()
def __init__(self, *args, **kwargs):
self.data = pd.DataFrame(*args, **kwargs)
@property
def data(self):
"""
Covariance matrix as a dataframe.
Attributes
----------
index : `pandas.Index` or `pandas.MultiIndex`
indices
columns : `pandas.Index` or `pandas.MultiIndex`
columns
values : `numpy.array`
covariance values as `float`
Returns
-------
`pandas.DataFrame`
covariance matrix
Notes
-----
..note :: In the future, another tests will be implemented to check
that the covariance matrix is symmetric and have positive variances.
Examples
--------
>>> with pytest.raises(TypeError): sandy.CategoryCov(np.array[1])
>>> with pytest.raises(TypeError): sandy.CategoryCov(np.array([[1, 2], [2, -4]]))
>>> with pytest.raises(TypeError): sandy.CategoryCov(np.array([[1, 2], [3, 4]]))
"""
return self._data
@data.setter
def data(self, data):
self._data = pd.DataFrame(data, dtype=float)
if not len(data.shape) == 2 and data.shape[0] == data.shape[1]:
raise TypeError("Covariance matrix must have two dimensions")
if not (np.diag(data) >= 0).all():
raise TypeError("Covariance matrix must have positive variance")
sym_limit = 10
# Round to avoid numerical fluctuations
if not (data.values.round(sym_limit) == data.values.T.round(sym_limit)).all():
raise TypeError("Covariance matrix must be symmetric")
@property
def size(self):
return self.data.values.shape[0]
def get_std(self):
"""
Extract standard deviations.
Returns
-------
`pandas.Series`
1d array of standard deviations
Examples
--------
>>> sandy.CategoryCov([[1, 0.4],[0.4, 1]]).get_std()
0 1.00000e+00
1 1.00000e+00
Name: STD, dtype: float64
"""
cov = self.to_sparse().diagonal()
std = np.sqrt(cov)
return pd.Series(std, index=self.data.index, name="STD")
def get_eig(self, tolerance=None):
"""
Extract eigenvalues and eigenvectors.
Parameters
----------
tolerance : `float`, optional, default is `None`
replace all eigenvalues smaller than a given tolerance with zeros.
The replacement condition is implemented as:
.. math::
$$
\frac{e_i}{e_{MAX}} < tolerance
$$
Then, a `tolerance=1e-3` will replace all eigenvalues
1000 times smaller than the largest eigenvalue.
A `tolerance=0` will replace all negative eigenvalues.
Returns
-------
`Pandas.Series`
array of eigenvalues
`pandas.DataFrame`
matrix of eigenvectors
Notes
-----
.. note:: only the real part of the eigenvalues is preserved
.. note:: the discussion associated to the implementeation
of this algorithm is available [here](https://github.com/luca-fiorito-11/sandy/discussions/135)
Examples
--------
Extract eigenvalues of correlation matrix.
>>> sandy.CategoryCov([[1, 0.4], [0.4, 1]]).get_eig()[0]
0 1.40000e+00
1 6.00000e-01
Name: EIG, dtype: float64
Extract eigenvectors of correlation matrix.
>>> sandy.CategoryCov([[1, 0.4], [0.4, 1]]).get_eig()[1]
0 1
0 7.07107e-01 -7.07107e-01
1 7.07107e-01 7.07107e-01
Extract eigenvalues of covariance matrix.
>>> sandy.CategoryCov([[0.1, 0.1], [0.1, 1]]).get_eig()[0]
0 8.90228e-02
1 1.01098e+00
Name: EIG, dtype: float64
Set up a tolerance.
>>> sandy.CategoryCov([[0.1, 0.1], [0.1, 1]]).get_eig(tolerance=0.1)[0]
0 0.00000e+00
1 1.01098e+00
Name: EIG, dtype: float64
Test with negative eigenvalues.
>>> sandy.CategoryCov([[1, 2], [2, 1]]).get_eig()[0]
0 3.00000e+00
1 -1.00000e+00
Name: EIG, dtype: float64
Replace negative eigenvalues.
>>> sandy.CategoryCov([[1, 2], [2, 1]]).get_eig(tolerance=0)[0]
0 3.00000e+00
1 0.00000e+00
Name: EIG, dtype: float64
Check output size.
>>> cov = sandy.CategoryCov.random_cov(50, seed=11)
>>> assert cov.get_eig()[0].size == cov.data.shape[0] == 50
>>> sandy.CategoryCov([[1, 0.2, 0.1], [0.2, 2, 0], [0.1, 0, 3]]).get_eig()[0]
0 9.56764e-01
1 2.03815e+00
2 3.00509e+00
Name: EIG, dtype: float64
Real test on H1 file
>>> endf6 = sandy.get_endf6_file("jeff_33", "xs", 10010)
>>> ek = sandy.energy_grids.CASMO12
>>> err = endf6.get_errorr(ek_errorr=ek, err=1)
>>> cov = err.get_cov()
>>> cov.get_eig()[0].sort_values(ascending=False).head(7)
0 3.66411e-01
1 7.05311e-03
2 1.55346e-03
3 1.60175e-04
4 1.81374e-05
5 1.81078e-06
6 1.26691e-07
Name: EIG, dtype: float64
>>> assert not (cov.get_eig()[0] >= 0).all()
>>> assert (cov.get_eig(tolerance=0)[0] >= 0).all()
"""
E, V = scipy.linalg.eig(self.data)
E = pd.Series(E.real, name="EIG")
V = pd.DataFrame(V.real)
if tolerance is not None:
E[E/E.max() < tolerance] = 0
return E, V
def get_corr(self):
"""
Extract correlation matrix.
Returns
-------
df : :obj: `CetgoryCov`
correlation matrix
Examples
--------
>>> sandy.CategoryCov([[4, 2.4],[2.4, 9]]).get_corr()
0 1
0 1.00000e+00 4.00000e-01
1 4.00000e-01 1.00000e+00
"""
cov = self.data.values
with np.errstate(divide='ignore', invalid='ignore'):
coeff = np.true_divide(1, self.get_std().values)
coeff[~ np.isfinite(coeff)] = 0 # -inf inf NaN
corr = np.multiply(np.multiply(cov, coeff).T, coeff)
df = pd.DataFrame(
corr,
index=self.data.index,
columns=self.data.columns,
)
return self.__class__(df)
def invert(self, rows=None):
"""
Method for calculating the inverse matrix.
Parameters
----------
tables : `bool`, optional
Option to use row calculation for matrix calculations. The
default is False.
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
Returns
-------
`CategoryCov`
The inverse matrix.
Examples
--------
>>> S = sandy.CategoryCov(np.diag(np.array([1, 2, 3])))
>>> S.invert()
0 1 2
0 1.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 5.00000e-01 0.00000e+00
2 0.00000e+00 0.00000e+00 3.33333e-01
>>> S = sandy.CategoryCov(np.diag(np.array([0, 2, 3])))
>>> S.invert()
0 1 2
0 0.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 5.00000e-01 0.00000e+00
2 0.00000e+00 0.00000e+00 3.33333e-01
>>> S = sandy.CategoryCov(np.diag(np.array([0, 2, 3])))
>>> S.invert(rows=1)
0 1 2
0 0.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 5.00000e-01 0.00000e+00
2 0.00000e+00 0.00000e+00 3.33333e-01
"""
index = self.data.index
columns = self.data.columns
M_nonzero_idxs, M_reduce = reduce_size(self.data)
cov = sps.csc_matrix(M_reduce.values)
rows_ = cov.shape[0] if rows is None else rows
data = sparse_tables_inv(cov, rows=rows_)
M_inv = restore_size(M_nonzero_idxs, data, len(self.data))
M_inv = M_inv.reindex(index=index, columns=columns).fillna(0)
return self.__class__(M_inv)
def log2norm_cov(self, mu):
"""
Transform covariance matrix to the one of the underlying normal
distribution.
Parameters
----------
mu : iterable
The desired mean values of the target lognormal distribution.
Returns
-------
`CategoryCov` of the underlying normal covariance matrix
Examples
--------
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> cov.log2norm_cov(pd.Series(np.ones(cov.data.shape[0]), index=cov.data.index))
A B C
A 2.19722e+00 1.09861e+00 1.38629e+00
B 1.09861e+00 2.39790e+00 1.60944e+00
C 1.38629e+00 1.60944e+00 2.07944e+00
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = pd.Series([1, 2, .5], index=["A", "B", "C"])
>>> cov.log2norm_cov(mu)
A B C
A 2.19722e+00 6.93147e-01 1.94591e+00
B 6.93147e-01 1.25276e+00 1.60944e+00
C 1.94591e+00 1.60944e+00 3.36730e+00
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = [1, 2, .5]
>>> cov.log2norm_cov(mu)
A B C
A 2.19722e+00 6.93147e-01 1.94591e+00
B 6.93147e-01 1.25276e+00 1.60944e+00
C 1.94591e+00 1.60944e+00 3.36730e+00
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = np.array([1, 2, .5])
>>> cov.log2norm_cov(mu)
A B C
A 2.19722e+00 6.93147e-01 1.94591e+00
B 6.93147e-01 1.25276e+00 1.60944e+00
C 1.94591e+00 1.60944e+00 3.36730e+00
Notes
-----
..notes:: Reference for the equation is 10.1016/j.nima.2012.06.036
.. math::
$$
cov(lnx_i, lnx_j) = \ln\left(\frac{cov(x_i,x_j)}{<x_i>\cdot<x_j>}+1\right)
$$
"""
mu_ = np.diag(1 / pd.Series(mu))
mu_ = pd.DataFrame(mu_, index=self.data.index, columns=self.data.index)
return self.__class__(np.log(self.sandwich(mu_).data + 1))
def log2norm_mean(self, mu):
"""
Transform mean values to the mean values of the undelying normal
distribution.
Parameters
----------
mu : iterable
The target mean values.
Returns
-------
`pd.Series` of the underlyig normal distribution mean values
Examples
--------
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = pd.Series(np.ones(cov.data.shape[0]), index=cov.data.index)
>>> cov.log2norm_mean(mu)
A -1.09861e+00
B -1.19895e+00
C -1.03972e+00
dtype: float64
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> cov.log2norm_mean([1, 1, 1])
A -1.09861e+00
B -1.19895e+00
C -1.03972e+00
dtype: float64
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = np.ones(cov.data.shape[0])
>>> cov.log2norm_mean(mu)
A -1.09861e+00
B -1.19895e+00
C -1.03972e+00
dtype: float64
Reindexing example
"""
mu_ = pd.Series(mu)
mu_.index = self.data.index
return np.log(mu_**2 / np.sqrt(np.diag(self.data) + mu_**2))
def sampling(self, nsmp, seed=None, rows=None, pdf='normal',
tolerance=None, relative=True):
"""
Extract perturbation coefficients according to chosen distribution with
covariance from given covariance matrix. See note for non-normal
distribution sampling.
The samples' mean will be 1 or 0 depending on `relative` kwarg.
Parameters
----------
nsmp : `int`
number of samples.
seed : `int`, optional, default is `None`
seed for the random number generator (by default use `numpy`
dafault pseudo-random number generator).
rows : `int`, optional, default is `None`
option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
pdf : `str`, optional, default is 'normal'
random numbers distribution.
Available distributions are:
* `'normal'`
* `'uniform'`
* `'lognormal'`
tolerance : `float`, optional, default is `None`
replace all eigenvalues smaller than a given tolerance with zeros.
relative : `bool`, optional, default is `True`
flag to switch between relative and absolute covariance matrix
handling
* `True`: samples' mean will be 1
* `False`: samples' mean will be 0
Returns
-------
`sandy.Samples`
object containing samples
Notes
-----
.. note:: sampling with uniform distribution is performed on
diagonal covariance matrix, neglecting all correlations.
.. note:: sampling with lognormal distribution gives a set of samples
with mean=1 as lognormal distribution can not have mean=0.
Therefore, `relative` parameter does not apply to it.
Examples
--------
Draw 3 sets of samples using custom seed:
>>> sandy.CategoryCov([[1, 0.4],[0.4, 1]]).sampling(3, seed=11)
0 1
0 -7.49455e-01 -2.13159e+00
1 1.28607e+00 1.10684e+00
2 1.48457e+00 9.00879e-01
>>> sandy.CategoryCov([[1, 0.4],[0.4, 1]]).sampling(3, seed=11, rows=1)
0 1
0 -7.49455e-01 -2.13159e+00
1 1.28607e+00 1.10684e+00
2 1.48457e+00 9.00879e-01
>>> sample = sandy.CategoryCov([[1, 0.4],[0.4, 1]]).sampling(1000000, seed=11)
>>> sample.data.cov()
0 1
0 9.98662e-01 3.99417e-01
1 3.99417e-01 9.98156e-01
Small negative eigenvalue:
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(3, seed=11, tolerance=0)
0 1
0 2.74945e+00 5.21505e+00
1 7.13927e-01 1.07147e+00
2 5.15435e-01 1.64683e+00
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, tolerance=0).data.cov()
0 1
0 9.98662e-01 -1.99822e-01
1 -1.99822e-01 2.99437e+00
Sampling with different `pdf`:
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(3, seed=11, pdf='uniform', tolerance=0)
0 1
0 -1.07578e-01 2.34960e+00
1 -6.64587e-01 5.21222e-01
2 8.72585e-01 9.12563e-01
>>> sandy.CategoryCov([[1, .2],[.2, 3]]).sampling(3, seed=11, pdf='lognormal', tolerance=0)
0 1
0 3.03419e+00 1.57919e+01
1 5.57248e-01 4.74160e-01
2 4.72366e-01 6.50840e-01
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='uniform', tolerance=0).data.cov()
0 1
0 1.00042e+00 -1.58806e-03
1 -1.58806e-03 3.00327e+00
>>> sandy.CategoryCov([[1, .2],[.2, 3]]).sampling(1000000, seed=11, pdf='lognormal', tolerance=0).data.cov()
0 1
0 1.00219e+00 1.99199e-01
1 1.99199e-01 3.02605e+00
`relative` kwarg usage:
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='normal', tolerance=0, relative=True).data.mean(axis=0)
0 1.00014e+00
1 9.99350e-01
dtype: float64
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='normal', tolerance=0, relative=False).data.mean(axis=0)
0 1.41735e-04
1 -6.49679e-04
dtype: float64
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='uniform', tolerance=0, relative=True).data.mean(axis=0)
0 9.98106e-01
1 9.99284e-01
dtype: float64
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='uniform', tolerance=0, relative=False).data.mean(axis=0)
0 -1.89367e-03
1 -7.15929e-04
dtype: float64
Lognormal distribution sampling indeoendency from `relative` kwarg
>>> sandy.CategoryCov([[1, .2],[.2, 3]]).sampling(1000000, seed=11, pdf='lognormal', tolerance=0, relative=True).data.mean(axis=0)
0 9.99902e-01
1 9.99284e-01
dtype: float64
>>> sandy.CategoryCov([[1, .2],[.2, 3]]).sampling(1000000, seed=11, pdf='lognormal', tolerance=0, relative=False).data.mean(axis=0)
0 9.99902e-01
1 9.99284e-01
dtype: float64
"""
dim = self.data.shape[0]
pdf_ = pdf if pdf != 'lognormal' else 'normal'
y = sample_distribution(dim, nsmp, seed=seed, pdf=pdf_) - 1
y = sps.csc_matrix(y)
# the covariance matrix to decompose is created depending on the chosen
# pdf
if pdf == 'uniform':
to_decompose = self.__class__(np.diag(np.diag(self.data)))
elif pdf == 'lognormal':
ones = np.ones(self.data.shape[0])
to_decompose = self.log2norm_cov(ones)
else:
to_decompose = self
L = sps.csr_matrix(to_decompose.get_L(rows=rows,
tolerance=tolerance))
samples = pd.DataFrame(L.dot(y).toarray(), index=self.data.index,
columns=list(range(nsmp)))
if pdf == 'lognormal':
# mean value of lognormally sampled distributions will be one by
# defaul
samples = np.exp(samples.add(self.log2norm_mean(ones), axis=0))
elif relative:
samples += 1
return sandy.Samples(samples.T)
@classmethod
def from_var(cls, var):
"""
Construct the covariance matrix from the variance vector.
Parameters
----------
var : 1D iterable
Variance vector.
Returns
-------
`CategoryCov`
Object containing the covariance matrix.
Example
-------
>>> S = pd.Series(np.array([0, 2, 3]), index=pd.Index([1, 2, 3]))
>>> cov = sandy.CategoryCov.from_var(S)
>>> cov
1 2 3
1 0.00000e+00 0.00000e+00 0.00000e+00
2 0.00000e+00 2.00000e+00 0.00000e+00
3 0.00000e+00 0.00000e+00 3.00000e+00
>>> assert type(cov) is sandy.CategoryCov
>>> S = sandy.CategoryCov.from_var((1, 2, 3))
>>> S
0 1 2
0 1.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 2.00000e+00 0.00000e+00
2 0.00000e+00 0.00000e+00 3.00000e+00
>>> assert type(S) is sandy.CategoryCov
>>> assert type(sandy.CategoryCov.from_var([1, 2, 3])) is sandy.CategoryCov
"""
var_ = pd.Series(var)
cov_values = sps.diags(var_.values).toarray()
cov = pd.DataFrame(cov_values,
index=var_.index, columns=var_.index)
return cls(cov)
@classmethod
def from_stdev(cls, std):
"""
Construct the covariance matrix from the standard deviation vector.
Parameters
----------
std : `pandas.Series`
Standard deviations vector.
Returns
-------
`CategoryCov`
Object containing the covariance matrix.
Example
-------
>>> S = pd.Series(np.array([0, 2, 3]), index=pd.Index([1, 2, 3]))
>>> cov = sandy.CategoryCov.from_stdev(S)
>>> cov
1 2 3
1 0.00000e+00 0.00000e+00 0.00000e+00
2 0.00000e+00 4.00000e+00 0.00000e+00
3 0.00000e+00 0.00000e+00 9.00000e+00
>>> assert type(cov) is sandy.CategoryCov
>>> S = sandy.CategoryCov.from_stdev((1, 2, 3))
>>> S
0 1 2
0 1.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 4.00000e+00 0.00000e+00
2 0.00000e+00 0.00000e+00 9.00000e+00
>>> assert type(S) is sandy.CategoryCov
>>> assert type(sandy.CategoryCov.from_stdev([1, 2, 3])) is sandy.CategoryCov
"""
std_ = pd.Series(std)
var = std_ * std_
return cls.from_var(var)
@classmethod
def from_stack(cls, data_stack, index, columns, values, rows=10000000,
kind='upper'):
"""
Create a covariance matrix from a stacked dataframe.
Parameters
----------
data_stack : `pd.Dataframe`
Stacked dataframe.
index : 1D iterable, optional
Index of the final covariance matrix.
columns : 1D iterable, optional
Columns of the final covariance matrix.
values : `str`, optional
Name of the column where the values are located.
rows : `int`, optional
Number of rows to take into account into each loop. The default
is 10000000.
kind : `str`, optional
Select if the stack data represents upper or lower triangular
matrix. The default is 'upper.
Returns
-------
`sandy.CategoryCov`
Covarinace matrix.
Examples
--------
If the stack data represents the covariance matrix:
>>> S = pd.DataFrame(np.array([[1, 1, 1], [1, 2, 1], [1, 1, 1]]))
>>> S = S.stack().reset_index().rename(columns = {'level_0': 'dim1', 'level_1': 'dim2', 0: 'cov'})
>>> S = S[S['cov'] != 0]
>>> sandy.CategoryCov.from_stack(S, index=['dim1'], columns=['dim2'], values='cov', kind='all')
dim2 0 1 2
dim1
0 1.00000e+00 1.00000e+00 1.00000e+00
1 1.00000e+00 2.00000e+00 1.00000e+00
2 1.00000e+00 1.00000e+00 1.00000e+00
If the stack data represents only the upper triangular part of the
covariance matrix:
>>> test_1 = sandy.CategoryCov.from_stack(minimal_covtest, index=["MAT", "MT", "E"], columns=["MAT1", "MT1", "E1"], values='VAL').data
>>> test_1
MAT1 9437
MT1 2 102
E1 1.00000e-02 2.00000e+05 1.00000e-02 2.00000e+05
MAT MT E
9437 2 1.00000e-02 2.00000e-02 0.00000e+00 4.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 9.00000e-02 0.00000e+00 5.00000e-02
102 1.00000e-02 4.00000e-02 0.00000e+00 1.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 5.00000e-02 0.00000e+00 1.00000e-02
>>> test_2 = sandy.CategoryCov.from_stack(minimal_covtest, index=["MAT", "MT", "E"], columns=["MAT1", "MT1", "E1"], values='VAL', rows=1).data
>>> test_2
MAT1 9437
MT1 2 102
E1 1.00000e-02 2.00000e+05 1.00000e-02 2.00000e+05
MAT MT E
9437 2 1.00000e-02 2.00000e-02 0.00000e+00 4.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 9.00000e-02 0.00000e+00 5.00000e-02
102 1.00000e-02 4.00000e-02 0.00000e+00 1.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 5.00000e-02 0.00000e+00 1.00000e-02
>>> assert (test_1 == test_2).all().all()
If the stack data represents only the lower triangular part of the
covariance matrix:
>>> test_1 = sandy.CategoryCov.from_stack(minimal_covtest, index=["MAT1", "MT1", "E1"], columns=["MAT", "MT", "E"], values='VAL', kind="lower").data
>>> test_1
MAT 9437
MT 2 102
E 1.00000e-02 2.00000e+05 1.00000e-02 2.00000e+05
MAT1 MT1 E1
9437 2 1.00000e-02 2.00000e-02 0.00000e+00 4.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 9.00000e-02 0.00000e+00 5.00000e-02
102 1.00000e-02 4.00000e-02 0.00000e+00 1.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 5.00000e-02 0.00000e+00 1.00000e-02
>>> test_2 = sandy.CategoryCov.from_stack(minimal_covtest, index=["MAT1", "MT1", "E1"], columns=["MAT", "MT", "E"], values='VAL', kind="lower", rows=1).data
>>> test_2
MAT 9437
MT 2 102
E 1.00000e-02 2.00000e+05 1.00000e-02 2.00000e+05
MAT1 MT1 E1
9437 2 1.00000e-02 2.00000e-02 0.00000e+00 4.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 9.00000e-02 0.00000e+00 5.00000e-02
102 1.00000e-02 4.00000e-02 0.00000e+00 1.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 5.00000e-02 0.00000e+00 1.00000e-02
>>> assert (test_1 == test_2).all().all()
"""
cov = segmented_pivot_table(data_stack, rows=rows, index=index,
columns=columns, values=values)
if kind == 'all':
return cls(cov)
else:
return triu_matrix(cov, kind=kind)
def _gls_Vy_calc(self, S, rows=None):
"""
2D calculated output using
.. math::
$$
S\cdot V_{x_{prior}}\cdot S.T
$$
Parameters
----------
S : 2D iterable
Sensitivity matrix (MXN).
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
Returns
-------
`pd.DataFrame`
Covariance matrix `Vy_calc` calculated using
S.dot(Vx_prior).dot(S.T)
Example
-------
>>> S = np.array([[1, 2], [3, 4]])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> cov._gls_Vy_calc(S)
0 1
0 5.00000e+00 1.10000e+01
1 1.10000e+01 2.50000e+01
>>> cov._gls_Vy_calc(S, rows=1)
0 1
0 5.00000e+00 1.10000e+01
1 1.10000e+01 2.50000e+01
"""
index = pd.DataFrame(S).index
S_ = pd.DataFrame(S).values
rows_ = S_.shape[0] if rows is None else rows
Vy_calc = sparse_tables_dot_multiple([S_, self.data.values,
S_.T], rows=rows_)
return pd.DataFrame(Vy_calc, index=index, columns=index)
def _gls_G(self, S, Vy_extra=None, rows=None):
"""
2D calculated output using
.. math::
$$
S\cdot V_{x_{prior}}\cdot S.T + V_{y_{extra}}
$$
Parameters
----------
S : 2D iterable
Sensitivity matrix (MXN).
Vy_extra : 2D iterable, optional.
2D covariance matrix for y_extra (MXM).
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
Returns
-------
`pd.DataFrame`
Covariance matrix `G` calculated using
S.dot(Vx_prior).dot(S.T) + Vy_extra
Example
-------
>>> S = np.array([[1, 2], [3, 4]])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> Vy = np.diag(pd.Series([1, 1]))
>>> cov._gls_G(S, Vy)
0 1
0 6.00000e+00 1.10000e+01
1 1.10000e+01 2.60000e+01
>>> cov._gls_G(S, Vy, rows=1)
0 1
0 6.00000e+00 1.10000e+01
1 1.10000e+01 2.60000e+01
>>> cov._gls_G(S)
0 1
0 5.00000e+00 1.10000e+01
1 1.10000e+01 2.50000e+01
>>> cov._gls_G(S, rows=1)
0 1
0 5.00000e+00 1.10000e+01
1 1.10000e+01 2.50000e+01
"""
# GLS_sensitivity:
Vy_calc = self._gls_Vy_calc(S, rows=rows)
if Vy_extra is not None:
# Data in a appropriate format
Vy_extra_ = sandy.CategoryCov(Vy_extra).data
index = pd.DataFrame(Vy_extra).index
Vy_extra_ = Vy_extra_.values
Vy_calc = Vy_calc.reindex(index=index, columns=index).fillna(0).values
# Calculations:
Vy_calc = sps.csr_matrix(Vy_calc)
Vy_extra_ = sps.csr_matrix(Vy_extra_)
# G calculation
G = Vy_calc + Vy_extra_
G = pd.DataFrame(G.toarray(), index=index, columns=index)
else:
G = Vy_calc
return G
def _gls_G_inv(self, S, Vy_extra=None, rows=None):
"""
2D calculated output using
.. math::
$$
\left(S\cdot V_{x_{prior}}\cdot S.T + V_{y_{extra}}\right)^{-1}
$$
Parameters
----------
S : 2D iterable
Sensitivity matrix (MXN).
Vy_extra : 2D iterable, optional
2D covariance matrix for y_extra (MXM).
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
Returns
-------
`pd.DataFrame`
Covariance matrix `G_inv` calculated using
(S.dot(Vx_prior).dot(S.T) + Vy_extra)^-1
Example
-------
>>> S = np.array([[1, 2], [3, 4]])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> Vy = np.diag(pd.Series([1, 1]))
>>> cov._gls_G_inv(S, Vy)
0 1
0 7.42857e-01 -3.14286e-01
1 -3.14286e-01 1.71429e-01
>>> cov._gls_G_inv(S, Vy, rows=1)
0 1
0 7.42857e-01 -3.14286e-01
1 -3.14286e-01 1.71429e-01
>>> cov._gls_G_inv(S)
0 1
0 6.25000e+00 -2.75000e+00
1 -2.75000e+00 1.25000e+00
>>> cov._gls_G_inv(S, rows=1)
0 1
0 6.25000e+00 -2.75000e+00
1 -2.75000e+00 1.25000e+00
"""
if Vy_extra is not None:
index = pd.DataFrame(Vy_extra).index
G = self._gls_G(S, Vy_extra=Vy_extra, rows=rows).values
else:
index = pd.DataFrame(S).index
G = self._gls_Vy_calc(S, rows=rows).values
G_inv = sandy.CategoryCov(G).invert(rows=rows).data.values
return pd.DataFrame(G_inv, index=index, columns=index)
def _gls_general_sensitivity(self, S, Vy_extra=None,
rows=None, threshold=None):
"""
Method to obtain general sensitivity according to GLS
.. math::
$$
V_{x_{prior}}\cdot S.T \cdot \left(S\cdot V_{x_{prior}}\cdot S.T + V_{y_{extra}}\right)^{-1}
$$
Parameters
----------
S : 2D iterable
Sensitivity matrix (MXN).
Vy_extra : 2D iterable
2D covariance matrix for y_extra (MXM).
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
threshold : `int`, optional
threshold to avoid numerical fluctuations. The default is None.
Returns
-------
`GLS`
GLS sensitivity for a given Vy_extra and S.
Example
-------
>>> S = np.array([[1, 2], [3, 4]])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> Vy = np.diag(pd.Series([1, 1]))
>>> cov._gls_general_sensitivity(S, Vy)
0 1
0 -2.00000e-01 2.00000e-01
1 2.28571e-01 5.71429e-02
>>> S = pd.DataFrame([[1, 2], [3, 4]], index=[1, 2],columns=[3, 4])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> Vy = pd.DataFrame([[1, 0], [0, 1]], index=[1, 2], columns=[1, 2])
>>> cov._gls_general_sensitivity(S, Vy_extra=Vy)
1 2
3 -2.00000e-01 2.00000e-01
4 2.28571e-01 5.71429e-02
>>> cov._gls_general_sensitivity(S, Vy_extra=Vy, rows=1)
1 2
3 -2.00000e-01 2.00000e-01
4 2.28571e-01 5.71429e-02
>>> cov._gls_general_sensitivity(S)
1 2
3 -2.00000e+00 1.00000e+00
4 1.50000e+00 -5.00000e-01
>>> cov._gls_general_sensitivity(S, rows=1)
1 2
3 -2.00000e+00 1.00000e+00
4 1.50000e+00 -5.00000e-01
"""
index = pd.DataFrame(S).columns
columns = pd.DataFrame(S).index
S_ =
|
pd.DataFrame(S)
|
pandas.DataFrame
|
from contextlib import nullcontext as does_not_raise
from functools import partial
import pandas as pd
from pandas.testing import assert_series_equal
from solarforecastarbiter import datamodel
from solarforecastarbiter.reference_forecasts import persistence
from solarforecastarbiter.conftest import default_observation
import pytest
def load_data_base(data, observation, data_start, data_end):
# slice doesn't care about closed or interval label
# so here we manually adjust start and end times
if 'instant' in observation.interval_label:
pass
elif observation.interval_label == 'ending':
data_start += pd.Timedelta('1s')
elif observation.interval_label == 'beginning':
data_end -=
|
pd.Timedelta('1s')
|
pandas.Timedelta
|
"""
Market Data Provider.
This module contains implementations of the DataProvider abstract class, which
defines methods by which market information can be requested and presented.
"""
from abc import abstractmethod
from io import StringIO
import os
import pathlib
import time
from typing import Any, Dict
import pandas as pd
import requests
class DataProvider:
"""
Abstract class defining the DataProvider API.
"""
@abstractmethod
def intraday(self, day: pd.Timestamp):
"""
Gets the intraday data for a given day.
"""
@abstractmethod
def daily(self, year: pd.Timestamp):
"""
Gets the yearly data for a given +year+.
"""
@abstractmethod
def weekly(self):
"""
Returns a frame containing all weekly data+.
"""
@abstractmethod
def monthly(self):
"""
Returns a frame containing all monthly data.
"""
@abstractmethod
def first(self) -> pd.Timestamp:
"""
Returns the earliest date for which all types of data are available.
"""
@abstractmethod
def latest(self) -> pd.Timestamp:
"""
Returns the latest date for which all types of data are available.
"""
def access_all(self):
"""
Simulates accesses of all kinds. Designed to allow caching
implementations to perform all of their caching up front.
"""
class AVDataProvider(DataProvider):
"""
An implementation of DataProvider which uses the AlphaVantage API.
"""
def __init__(self, ticker: str, *,
reqs_per_minute: int = 5, cache: str = "cache",
local_cache_size: int = 10,
**kwargs: Dict[str, Any]):
"""
Init function.
+reqs_per_minute+ is the number of requests allowed per minute.
+ticker+ provides the ticker symbol for the underlying FD.
+cache+ provides a directory which the DataProvider can use to
organize data.
+local_cache_size+ is the total number of entries to keep on-hand to
speed up repeated accesses.
NOTE: This object assumes it is the only user of the API key at any
given time, and will attempt the maximum number of accesses possible.
"""
self.ticker = ticker
self.reqs_per_minute = reqs_per_minute
self.cache = pathlib.Path(cache)
self.local_cache_size = local_cache_size
self._calls = []
self._local_cache = {}
self._local_cache_history = []
# Ensure the cache is suitable
if self.cache.exists() and not self.cache.is_dir():
raise RuntimeError("Cache must be a directory")
self.cache.mkdir(exist_ok=True, parents=True)
# Get AlphaVantage API key
self.api_key = os.environ.get("SKINTBROKER_AV_API_KEY")
if not self.api_key:
raise RuntimeError("No AlphaVantage API key detected - please set "
"SKINTBROKER_AV_API_KEY")
def _check_local_cache(self, filename: pathlib.Path):
"""
Checks for data associated with a given +filename+ in the local cache.
If found, return it, else return None.
"""
if str(filename) in self._local_cache:
cache_entry = self._local_cache[str(filename)]
if len(self._local_cache) == self.local_cache_size:
self._local_cache_history.remove(str(filename))
self._local_cache_history.append(str(filename))
return cache_entry
return None
def _add_local_cache(self, filename: pathlib.Path, frame: pd.DataFrame):
"""
Adds a +frame+ associated with a given +filename+ to the local cache.
If the cache is full, pops off the least recently accessed entry.
"""
# If necessary, purge the oldest item from the cache
if len(self._local_cache) == self.local_cache_size:
old_name = self._local_cache_history.pop(0)
del self._local_cache[old_name]
self._local_cache[str(filename)] = frame
self._local_cache_history.append(str(filename))
def intraday(self, day: pd.Timestamp):
"""
Gets the intraday data for a given day.
"""
# TODO handle today data
# First, check if the data is already cached
cache_dir = self.cache/self.ticker/str(day.year)/str(day.month)
csv = cache_dir/f"{day.day}_per_minute.csv"
data = self._check_local_cache(csv)
if data is not None:
return data
if cache_dir.exists() and csv.exists():
frame = pd.read_csv(csv, parse_dates=[0],
infer_datetime_format=True,
index_col='time')
self._add_local_cache(csv, frame)
return frame
# Otherwise, download it. Intraday data is divided into 30-day
# segments, so first determine just how far back to look.
days = (_now().floor('d') - day.floor('d')).days - 1
month = (days // 30) % 12 + 1
year = (days // 360) + 1
params = {"function": "TIME_SERIES_INTRADAY_EXTENDED",
"interval": "1min",
"symbol": self.ticker,
"slice": f"year{year}month{month}"}
request_frame = self._api_request(**params)
if request_frame.empty:
return None
# Cache all downloaded data - no point in wasting queries!
grouper = pd.Grouper(freq='D')
for date, group in request_frame.groupby(grouper):
date_dir = self.cache/self.ticker/str(date.year)/str(date.month)
date_csv = date_dir/f"{date.day}_per_minute.csv"
if not date_csv.exists():
date_dir.mkdir(exist_ok=True, parents=True)
group.to_csv(date_csv, index_label='time')
# Try again. If there's still no data, there probably isn't any.
if csv.exists():
frame = pd.read_csv(csv, parse_dates=[0],
infer_datetime_format=True,
index_col='time')
self._add_local_cache(csv, frame)
return frame
return None
def daily(self, year: pd.Timestamp):
"""
Gets the yearly data for a given +year+.
"""
# First, check if the data is already cached
now = _now()
cache_dir = self.cache/self.ticker/str(year.year)
csv = cache_dir/"per_day.csv"
data = self._check_local_cache(csv)
if data is not None:
return data
if cache_dir.exists() and csv.exists():
frame = pd.read_csv(csv, parse_dates=[0],
infer_datetime_format=True,
index_col='time')
# If the data is from this year yet it isn't today's data,
# update anyway.
if year.year != now.year or \
frame.index[0].dayofyear != now.dayofyear:
self._add_local_cache(csv, frame)
return frame
# Update from remote
params = {"function": "TIME_SERIES_DAILY",
"symbol": self.ticker,
"outputsize": "full"}
request_frame = self._api_request(**params)
# Cache all returned data
grouper = pd.Grouper(freq='Y')
for date, group in request_frame.groupby(grouper):
date_dir = self.cache/self.ticker/str(date.year)
date_csv = date_dir/"per_day.csv"
# If the CSV is missing OR it's this year, then cache
if not date_csv.exists() or date.year == now.year:
date_dir.mkdir(exist_ok=True, parents=True)
group.to_csv(date_csv, index_label='time')
# Try again. If there's still no data, there probably isn't any.
if csv.exists():
frame = pd.read_csv(csv, parse_dates=[0],
infer_datetime_format=True,
index_col='time')
self._add_local_cache(csv, frame)
return frame
return None
def weekly(self):
"""
Returns a frame containing all weekly data.
"""
# First, check if the data is already cached
now = _now()
cache_dir = self.cache/self.ticker
csv = cache_dir/"per_week.csv"
data = self._check_local_cache(csv)
if data is not None:
return data
if cache_dir.exists() and csv.exists():
frame = pd.read_csv(csv, parse_dates=[0],
infer_datetime_format=True,
index_col='time')
# If the data isn't recent, update
if frame.index[0].week == now.week:
self._add_local_cache(csv, frame)
return frame
# Update from remote
# Set up call parameters
params = {"function": "TIME_SERIES_WEEKLY_ADJUSTED",
"symbol": self.ticker}
request_frame = self._api_request(**params)
# Cache returned data.
if not cache_dir.exists():
cache_dir.mkdir(exist_ok=True, parents=True)
request_frame.to_csv(csv, index_label='time')
# Try again. If there's still no data, there probably isn't any.
if csv.exists():
frame = pd.read_csv(csv, parse_dates=[0],
infer_datetime_format=True,
index_col='time')
self._add_local_cache(csv, frame)
return frame
return None
def monthly(self):
"""
Returns a frame containing all monthly data.
"""
# First, check if the data is already cached
now = _now()
cache_dir = self.cache/self.ticker
csv = cache_dir/"per_month.csv"
data = self._check_local_cache(csv)
if data is not None:
return data
if cache_dir.exists() and csv.exists():
frame = pd.read_csv(csv, parse_dates=[0],
infer_datetime_format=True,
index_col='time')
# If the data isn't recent, update
if frame.index[0].month == now.month:
self._add_local_cache(csv, frame)
return frame
# Update from remote
# Set up call parameters
params = {"function": "TIME_SERIES_MONTHLY_ADJUSTED",
"symbol": self.ticker}
request_frame = self._api_request(**params)
# Cache returned data.
if not cache_dir.exists():
cache_dir.mkdir(exist_ok=True, parents=True)
request_frame.to_csv(csv, index_label='time')
# Try again. If there's still no data, there probably isn't any.
if csv.exists():
frame = pd.read_csv(csv, parse_dates=[0],
infer_datetime_format=True,
index_col='time')
self._add_local_cache(csv, frame)
return frame
return None
def _api_request(self, **kwargs: Dict[str, str]) -> pd.DataFrame:
"""
Performs an API request using the passed parameters. Returns a
DataFrame or None.
"""
# Assemble the query
site = "https://www.alphavantage.co/query?"
params = [f"{key}={val}" for key, val in \
{**kwargs, "apikey": self.api_key, "datatype": "csv"}.items()]
query = "&".join(params)
# Perform call limit bookkeeping, and delay if needed.
if len(self._calls) >= self.reqs_per_minute:
oldest_call = self._calls.pop(0)
to_wait = 60 - (_now() - oldest_call).seconds
if to_wait >= 0:
time.sleep(to_wait + 1)
# Call the API and generate the dataframe
print("Querying: " + site + query)
response = requests.get(site + query)
response.encoding = 'utf-8'
index_label = 'time' if "INTRADAY" in kwargs["function"] \
else 'timestamp'
frame = pd.read_csv(StringIO(response.text), parse_dates=[0],
infer_datetime_format=True,
index_col=index_label)
# Record this call for future checks
self._calls.append(_now())
return frame
def first(self) -> pd.Timestamp:
"""
Returns the earliest date for which all types of data are available.
"""
# Based on the AlphaVantage system, it's reasonable to assume data
# exists for two years back from today. Note that it's entirely
# possible that cached data exists from even earlier, so a future
# extension should search for it.
return _now() - pd.Timedelta(720 - 1, unit='d')
def latest(self) -> pd.Timestamp:
"""
Returns the latest date for which all types of data are available.
"""
# Yesterday is fine
return _now() - pd.Timedelta(1, unit='d')
def access_all(self) -> None:
"""
Simulates accesses of all kinds. Designed to allow caching
implementations to perform all of their caching up front.
"""
# First, handle daily, weekly, and monthly entries for the last 20
# years. As this comes in one immense blob, just access that.
now = _now()
self.monthly()
self.weekly()
self.daily(now)
# Then handle intraday for the last 2 years.
days =
|
pd.date_range(end=now, freq='D', periods=360 * 2 - 1)
|
pandas.date_range
|
"""
@author : <NAME>
@date : 1-10-2021
Ensemble Learning is an often overshadowed and underestimated field of machine learning. Here we provide 2 algorithms
central to the game - random forests and ensemble/voting classifier. Random Forests are very especially fast
with parallel processing to fit multiple decision trees at the same time.
"""
import pandas as pd
import numpy as np
from multiprocessing import cpu_count
from joblib import parallel_backend, delayed, Parallel
import random
import math
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
class RandomForest:
"""
Random Forests may seem intimidating but they are super simple. They are just a bunch of Decision Trees that are
trained on different sets of the data. You give us the data, and we will create those different sets. You may choose
for us to sample data with replacement or without, either way that's up to you. Keep in mind that because this is
a bunch of Decision Trees, classification is only supported (avoid using decision trees for regression - it's
range of predictions is limited.) The random forest will have each of its decision trees predict on data and just
choose the most common prediction (not the average.)
Enjoy this module - it's one of our best.
"""
def __init__(self, num_classifiers=20, max_branches=math.inf, min_samples=1, replacement=True, min_data=None):
"""
:param num_classifiers: Number of decision trees you want created.
:param max_branches: Maximum number of branches each Decision Tree can have.
:param min_samples: Minimum number of samples for a branch in any decision tree (in the forest) to split.
:param replacement: Whether or not any of the data points in different chunks/sets of data can overlap.
:param min_data: Minimum number of data there can be in any given data chunk. Each classifier is trained on a
chunk of data, and if you want to make sure each chunk has 3 points for example you can set min_data = 3. It's
default is 50% of the amount of data, the None is just a placeholder.
"""
from .decision_trees import DecisionTree
self.DecisionTree = DecisionTree
self.trees = []
self.num_classifiers = num_classifiers
self.max_branches = max_branches
self.min_samples = min_samples
self.replacement = replacement
self.min_data = min_data
def fit(self, x_train, y_train):
"""
:param x_train: 2D training data
:param y_train: 1D training labels
:return:
"""
data, labels = np.array(x_train).tolist(), np.array(y_train).tolist()
num_classifiers = self.num_classifiers
max_branches = self.max_branches
min_samples = self.min_samples
replacement = self.replacement
min_data = self.min_data
# on default set min_data = 50% of your dataset
if not min_data:
min_data = round(0.5 * len(data))
# merge data and labels together [(d1, l1) .. (dN, lN)]
data_and_labels = [
(data_point, label) for data_point, label in zip(data, labels)
]
self.chunk_data, self.chunk_labels = [], []
if replacement:
for classifier in range(num_classifiers):
num_samples = min_data + random.randint(0, len(data) - min_data)
data_and_labels_set = random.sample(data_and_labels, num_samples)
self.chunk_data.append(
[data_point for data_point, _ in data_and_labels_set]
)
self.chunk_labels.append([label for _, label in data_and_labels_set])
else:
"""no replacement just use up all of the data here"""
data_and_labels_df =
|
pd.DataFrame({"data": data, "labels": labels})
|
pandas.DataFrame
|
# Copyright 2019 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Adapted by <NAME> in November,2019 from this Colab notebook:
#https://colab.research.google.com/github/google-research/bert/blob/master/predicting_movie_reviews_with_bert_on_tf_hub.ipynb.
#Changes includes
# - Reading our stressor data and parsing it properly
# - reconfiguring the last layer to include N neurons corresponding to N categories
# - correcting the probability output so that it follows [0,1] proper pattern
# - better analysis with confusion matrix
# - exporting to pb format for tensorflow serving api
import os
os.environ['LD_LIBRARY_PATH'] = '/usr/local/cuda-10.0/lib64'
import sys
print(sys.executable)
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedShuffleSplit
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
from datetime import datetime
import matplotlib.pyplot as plt
from sklearn.utils.multiclass import unique_labels
from sklearn.metrics import f1_score,confusion_matrix,classification_report,accuracy_score
import logging
logging.basicConfig(stream=sys.stdout, level=logging.ERROR)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_colwidth', 1000)
config = tf.ConfigProto()
#config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
#config.gpu_options.visible_device_list="0"
from tensorflow.python.client import device_lib
device_lib.list_local_devices()
import bert
from bert import run_classifier_with_tfhub
from bert import optimization
from bert import tokenization
from bert import modeling
import numpy as np
############ Utils functions ##################
def create_examples_prediction(df):
"""Creates examples for the training and dev sets."""
examples = []
for index, row in df.iterrows():
#labels = row[LABEL_HOT_VECTOR].strip('][').split(', ')
#labels = [float(x) for x in labels]
labels = list(row[label_list_text])
examples.append(labels)
return
|
pd.DataFrame(examples)
|
pandas.DataFrame
|
import math
import pandas as pd
import numpy as np
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, SGDRegressor
from sklearn.cross_validation import cross_val_score
from sklearn.metrics import mean_squared_error
from sklearn import svm
def get_past_midfielders():
data = pd.read_csv('../resources/merged.csv', sep=',', encoding='utf-8', index_col=0)
model = data[['player_id', 'name', 'season', 'pos', 'round', 'team_rank', 'opponent_team_rank', 'team_pot', 'opp_pot',
'concede_pot', 'opp_concede_pot', 'prev_points', 'form_points', 'total_points',
'long_form', 'ict_form']]
MidfielderModal = model.loc[model['pos'] == 'Defender']
MidfielderModal.drop('pos', axis=1, inplace=True)
MidfielderModal.sort_values(['season', 'round'], ascending=True, inplace=True)
MidfielderModal.to_csv('../resources/predictions/MIDFIELDERS.csv', sep=',', encoding='utf-8')
players = MidfielderModal[8587:]
keys = MidfielderModal['round']
values = pd.cut(MidfielderModal['round'], 3, labels=[1, 2, 3])
dictionary = dict(zip(keys, values))
MidfielderModal['round'] = values
X = MidfielderModal.drop(['total_points', 'season', 'player_id', 'name'], axis=1)
y = MidfielderModal[['total_points']]
X_train = X[:8586]
X_test = X[8587:]
y_train = y[:8586]
y_test = y[8587:]
regression_model = LinearRegression()
regression_model.fit(X_train, y_train)
score = regression_model.score(X_test, y_test)
y_pred = regression_model.predict(X_test)
testing = pd.concat([X_test, y_test], 1)
testing['Predicted'] = np.round(y_pred, 1)
testing['Prediction_Error'] = testing['total_points'] - testing['Predicted']
testing['player_id'] = 0
testing['name'] = 0
testing['player_id'] = players.player_id
testing['name'] = players.name
testing['round'] = 34
testing.to_csv('../resources/past/34_MIDS.csv', sep=',', encoding='utf-8')
# get_past_midfielders()
def merge():
one = pd.read_csv('../resources/predictions/30FOR.csv', sep=',', encoding='utf-8', index_col=0)
two = pd.read_csv('../resources/predictions/31FOR.csv', sep=',', encoding='utf-8', index_col=0)
three = pd.read_csv('../resources/predictions/32FOR.csv', sep=',', encoding='utf-8', index_col=0)
four = pd.read_csv('../resources/predictions/33FOR.csv', sep=',', encoding='utf-8', index_col=0)
five = pd.read_csv('../resources/predictions/34FOR.csv', sep=',', encoding='utf-8', index_col=0)
dfarray = [one, two, three, four, five]
MergedData =
|
pd.concat(dfarray)
|
pandas.concat
|
import pandas as pd
import numpy as np
from scipy.stats import mode
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error,r2_score
from sklearn.ensemble import GradientBoostingRegressor
#from sklearn import cross_validation, metrics
pd.options.mode.chained_assignment = None
from sklearn.externals import joblib
test = pd.read_csv('Test.csv')
train =
|
pd.read_csv('Train.csv')
|
pandas.read_csv
|
#%%
# ANCHOR IMPORTS
import sys
import pandas as pd, numpy as np
import pickle
import re
from sklearn import feature_extraction , feature_selection
from scipy.sparse import csr_matrix, hstack
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import Normalizer
from tqdm.autonotebook import trange, tqdm
import swifter
# Libraries for feature engineering.
import string
from collections import Counter # not necessary?
#from nnsplit import NNSplit
import spacy# .tokenizer.tokenize
from spellchecker import SpellChecker
# Other neat features.
from nltk.metrics.distance import edit_distance
from lexicalrichness import LexicalRichness
import syllables
import itertools
import textstat
# Stats
from scipy.stats import chisquare
#from statistics import mean
#%% Get spacy docs and save them to data to speed up development.
def get_docs(data, text_col='text_clean'):
nlp = spacy.load('en_core_web_sm')
nlp.enable_pipe("senter")
data['docs'] = data[tect_col].apply(lambda x: nlp(x))
#%%
def listify(series, feature_name=str):
return [{feature_name: x[1]} for x in series.items()]
#%%
# Extract Baseline feature
# Character trigrams (morphological/lexical/semantic?).
def ngrams(train, test, params):
"""Extract character ngrams.
Args:
train (list): list of texts to fit the vectorizer.
test (list): list of texts to transform to feature space.
params (dict): parameters for the vectorizer construction
Returns:
[type]: [description]
"""
vectorizer = CountVectorizer(lowercase=params['ngrams']['lowercase'],
ngram_range=params['ngrams']['size'], # experiment with ranges, e.g. ngram_range=(3,3)
analyzer=params['ngrams']['type'], #, also try "char_wb"
max_features=params['ngrams']['max_vocab']) # max_features=10000
# fit count vecotorizer to preprocessed tweets.
#vectorizer.fit(train)
# Transform into input vectors for train and test data.
train_vectors = vectorizer.fit_transform(train) # using fit_transform due to better implementation.
#train_vectors = vectorizer.transform(train) #.toarray()
test_vectors = vectorizer.transform(test) #.toarray()
# Inspect with vectorizer.get_feature_names() and .toarray()
#inverse = vectorizer.inverse_transform(train)
#feature_names = vectorizer.get_feature_names()
#print(f'Train ({type(train_vectors)}) feature matrix has shape: {train_vectors.shape}')
#print(f'Test ({type(test_vectors)}) feature matrix has shape: {test_vectors.shape}')
#return vectorizer
return vectorizer, train_vectors , test_vectors
#return inverse
#%% ANCHOR EXTRACT LIWC
def parse_liwc(file, **args):
"""Parse a (left) aligned version of the LIWC lexicon.
Args:
file (str): filepath to lexcion (excel).
Returns:
DataFrame: df or dict
"""
df = pd.read_excel(file, skiprows=2)
# Handling merged columns in file
### Adapted from https://stackoverflow.com/a/64179518 ###
df.columns = df.columns.to_series()\
.replace('Unnamed:\s\d+', np.nan, regex=True).ffill().values
# Multindex to represent multiple columns for some categories.
df.columns = pd.MultiIndex.from_tuples([(x, y)for x, y in
zip(df.columns, df.columns.to_series().groupby(level=0).cumcount())])
### Accessed 26-04-2021 ###
# d = data.to_dict(orient='list')
### Adapted from https://stackoverflow.com/a/50082926
# dm = data.melt()
# data = dm.set_index(['variable', dm.groupby('variable').cumcount()]).sort_index()['value'].unstack(0)
### Accessed 26-04-2021 ###
# Concat the terms by column.
# d = dict()
#d = {column: value for key, value in dd.items()}
# for ki, wl in dd.items():
# nl = []
# k, i = ki
# # for w in wl:
# # if w not in nl:
# # d[k].append(wl)
# if k in d:
# d[k].append(wl)
# else:
# d[k] = wl
### Solution from https://stackoverflow.com/a/48298420 ###
# TODO experiment with not sorting the index? or reesrorting columns to mach the multiindex or just original df.columns.
df = df.stack().sort_index(level=1).reset_index(drop=True)
### Accessed 26-04-2021 ###
# Check that merged columns have the right number of terms.
# sum(isinstance(x, str) for x in terms['Funct'])
return df.to_dict(orient='list')
#%%
# Extract LIWC matches (lexical/semantic)
def liwc_match(parsed, d, extract=False, text_col='text_clean'):
"""Search a corpus for matches against LIWC (2007) categories.
Args:
parsed (DataFrame): a pandas df with the all categories of LIWC prepared.
d (str): a filepath to a pickle file with a corpus to search.
extract (bool, optional): Switch specifying whether or not to return a Dict for feature extraction or feature inspection/analysis. Defaults to False.
Returns:
dict: a dict with {liwc_cat1...n : count} for each datapoint in the corpus OR a dict a, a dataFrame and a Series with results of searching the categories against the matches (absolute counts per datapoint (as dict and DF) totals per category (Series)).
"""
# load data to search.
# Could do Series.count(regex) or df[clean_text] -> (joined) list?
if isinstance(d, pd.DataFrame) == False: # the ... analysis case.
data = pd.read_pickle(d)
text = list(d) # a single row/tweet?
if extract == True: # The extract case
data = d
text = data[text_col]
# Dict for search results.
results = dict()
pats = dict() # save patterns to dict for debugging.
# Loop through category-termlist pairs.
for cat, terms in tqdm(parsed.items()):
# Remove nans from term lists.
terms = [term.strip(' ') for term in terms if isinstance(term, str)]
# Compile re pattern from term list.
#pat = re.compile('|'.join(terms), flags=re.MULTILINE)
#pat = re.compile('|'.join(
# [r'\b' + t[:-1] if t.endswith('*') else r'\b' + t + r'\b' for t in #terms]))
### Adapted from https://stackoverflow.com/a/65140193 ###
pat = re.compile('|'.join([r'\b' + t[:-1] + r'\w*' if t.endswith('*') else r'\b' + t + r'\b' for t in terms]) , flags=re.MULTILINE | re.IGNORECASE)
### Accessed 27-04-2021 ###
pats[cat] = pat
#i, char = enumerate(j_terms)
# for term in terms:
# i = 0
# try:
# pat = re.compile(term)
# #print(pat, counter,'\n')
# i +=1
# except:
# print('error here:\n'.upper(),pat, i)
# Aggregate matches per category into dict. storing tweet id's preserved in the source data.
#results[cat] = pat.finditer(text.values)
# For that, join values into list of lists -> re.match -> see below
# results[cat][re.match(pat)] = re.finditer(pat, row_list)
# if extract == True: You can't normalize since this isn't tokenized.
# results[cat] = text.apply(lambda x: x.str.count(pat) / len(x))
# else:
results[cat] = text.str.count(pat)
#results[cat] = text.swifter.apply(lambda x: re.finditer(pat, x))
# Store results in DataFrame
df_results = pd.DataFrame.from_dict(results)
# Totals per category
df_totals = df_results.sum().sort_values(ascending=False)
if extract == True:
# Export results to {index : {cat : count}...} for easy vectorization.
results_per_row = df_results.to_dict(orient='records') # or orient='index'? -> DictVectorizer
return results_per_row
return {'results' :
{'matches_dict' : results,
'matches_df' : df_results,
'matches_total': df_totals
},
'regex_pats' : pats
}
#%%
def norm_freqs(data, expression, count_name=str, normalize=True, analyze=True):
"""Get frequencies (normalized = optional) of a regex pattern in a Series with one or more strings.
Args:
data (DataFrame): a dataframe with texts to extract frequencies from.
expression (re.compile): a regex pattern to count occurrences of in each text.
count_name (str, optional): a name for the counted feature. Defaults to str.
normalize (bool, optional): [description]. Defaults to True.
Returns:
list: list of dicts with key = frequency name, value = frequency.
"""
# List to store frequencies
# freqList = list()
# Loop through each entry in the list of strings.
# for e in stringList:
# # Join to a regular string
# text = ' '.join(e)
# # Construct a dict for each entry with freuncies.
# c = {count_name : len([char for char in text if char in expression])}
# Get frequencies of a regex in a pandas column, normalize if set to True.
c = data.apply(lambda x: len(re.findall(
expression, x))/len(x) if normalize == True else len(re.findall(expression, x)))
### Adapted from https://stackoverflow.com/a/45452966 ###
# Cast frequencies Series to list of dicts.
cList = [{count_name: x[1]} for x in c.items()]
### Accessed 10-05-2021 ###
if analyze == True:
return cList
else:
return c
def binary_freq(data, expression, feature_name=str, analyze=True):
"""Search data for occurrences of a binary feature as a regex.
Args:
data (pd.Series): a series with text instances.
expression (re.compile): a regex or string to search for.
feature_name (str, optional): a name for the feature to extract. Defaults to str.
Returns:
list: a list with a dict mapping feature name to 1 or 0 (true/false) based on occurrence in texts.
"""
b = data.str.contains(expression).astype(int) # cast bools to 0/1
if analyze == True:
bList = [{feature_name: x[1]} for x in b.items()]
return bList
else:
return b
#%% ANCHOR extract character and word level features
# Extract character-level features (lexical/morphological).
def get_cl(data, text_col='text_clean', analyze=True):
# 0. Cast data text col .to_list()
# 1. Normalized punctation frequency.
# # Using pandas instead of lists + counter + dicts.
# df_results = pd.DataFrame({'text': textList})
# #p_pat = re.compile(r'[!"\$%&\'()*+,\-.\/:;=#@?\[\\\]^_`{|}~]*')
# p_pat = re.compile(re.escape(string.punctuation))
# df_results['punct'] = df_results.text.str.count(p_pat)
# the whole series
#train['text_clean'].str.count(p_pat)
df_punc_freq = data[text_col].apply(lambda x: len([char for char in ' '.join(x) if char in string.punctuation]) / len(' '.join(x)))
#return punc_freq, df_punc_freq
#df_punc_freq = pd.DataFrame.from_records(punc_freq)
# Add to cl dict.
#cl_results['punc_freq'] = punc_freq
#2. Specific characters (also normalized)
# 2.1 digits
d_pat = re.compile(r'\d' , re.M)
df_digits = norm_freqs(data[text_col], d_pat, count_name='digit_freq',normalize=True, analyze=False)
#return df_digits
# 2.2 Whitespace chars.
ws_pat = re.compile(r' ', re.M) # NOTE just using actual whitespace instead of \s
df_whitespaces = norm_freqs(data[text_col], ws_pat, count_name='whitespace_freq', normalize=True, analyze=False)
# 2.3 tab characters NOTE Doesn't occur in either corpus.
# tab_pat = re.compile(r'\t', re.M)
# tabs = norm_freqs(data[text_col], tab_pat, count_name='tab_freqs', normalize=True)
# 2.4 line break characters
br_pat = re.compile(r'[\r\n\f]', re.M)
df_lbreaks = norm_freqs(data[text_col], br_pat, count_name='line_break_freq', normalize=True, analyze=False)
# 2.5 Upperchase chars (per all chars)
up_pat = re.compile(r'[A-Z]', re.M) # Decide whether to be greedy about *all* uppercase chars or to be lazy (below). Also, @USER mentions are counted now. Can be excluded with \b(?!USER\b)[A-Z]. Try doing [^a-z\W] - caret negates the range of chars.
#up_pat = re.compile(r'(?<![a-z])*[A-Z](?![a-z])*' , re.M) # Only count chars if they are not a one-off in the beginning of words.
df_upchars = norm_freqs(data[text_col], up_pat, count_name= 'upper_char_freq', normalize=True, analyze=False)
# 2.6 Special chars other than punctuation. NOTE Doesn't make much sense when using a full punctuaion set..
spc_pat = re.compile(r"[^a-z \.,!?':;\s]", re.M)
df_spc = norm_freqs(data[text_col], spc_pat, count_name="special_characters", analyze=False)
#3. Repeated characters (binary features) # NOTE if you want counts of each repeated char, consider just defining it with regexes and then using norm_freqs, normalize=False?
# 3.1 question marks
quest_pat = re.compile(r'\?{2,}', re.M)
df_rep_quest = binary_freq(data[text_col] , quest_pat, feature_name='quest_rep', analyze=False)
# 3.2 periods (ellipsis)
per_pat = re.compile(r'\.{2,}', re.M)
df_rep_per = binary_freq(data[text_col] , per_pat, feature_name='period_rep', analyze=False)
# 3.3 exclamation marks
excl_pat = re.compile(r'!{2,}', re.M)
df_rep_excl = binary_freq(data[text_col] , excl_pat, feature_name='excl_rep', analyze=False)
# 4 Contains equal signs
eq_pat = re.compile(r'=', re.M)
df_equals = binary_freq(data[text_col] , eq_pat , feature_name='equals', analyze=False)
# 5 Quotes in chars
#quotes = data[text_col].apply(lambda x: len(re.findall(quot_pat, x)) / len(x)) # per character --- works.
#quotes_char = [{'quotes' : x[1]} for x in qoutes.items()]
if analyze == True:
#punc_freq = listify(df_punc_freq, feature_name='char_punc_freq') # new Alternative to punc_freq with dict comprehension.
textList = data[text_col].to_list()
### Old approach to punc_freqs for analysis.
cl_results = dict() # dict to store results.
punc_freq = list()
for e in textList:
text = ' '.join(e)
# Build dict with counts of all punct characters.
# The first c example does it per punctuation character, the second for all.
# Each count is normalized by total number of chars in the each string.
# NOTE not using regexes here. Single quotes/apostrophes/contractions are counted as well.
#c = {char:count/len(text) for char, count in Counter(text).items() #if char in string.punctuation}
# This should generalize to regex matches.
c = {'char_punc_freq': len([char for char in text if char in string.punctuation])/len(text)}
punc_freq.append(c)
digits = norm_freqs(data[text_col], d_pat, count_name='digit_freq',normalize=True)
whitespaces = norm_freqs(data[text_col], ws_pat, count_name='whitespace_freq', normalize=True)
lbreaks = norm_freqs(data[text_col], br_pat, count_name='line_break_freq', normalize=True)
upchars = norm_freqs(data[text_col], up_pat, count_name= 'upper_char_freq', normalize=True)
spc = norm_freqs(data[text_col], spc_pat, count_name="special_characters")
rep_quest = binary_freq(data[text_col] , quest_pat, feature_name='quest_rep')
rep_per = binary_freq(data[text_col] , per_pat, feature_name='period_rep')
rep_excl = binary_freq(data[text_col] , excl_pat, feature_name='excl_rep')
equals = binary_freq(data[text_col] , eq_pat , feature_name='equals')
# Store results
cl_results['char_punc_freq'] = punc_freq
cl_results['digit_freq'] = digits
cl_results['whitespace_freq'] = whitespaces
#cl_results['tab_freq'] = tabs does not occur in either corpus.
cl_results['linebreak_freq'] = lbreaks
cl_results['uppercased_char_freq'] = upchars
cl_results['special_char_freq'] = spc
cl_results['repeated_questionmark'] = rep_quest
cl_results['repeated_periods'] = rep_per
cl_results['repeated_exclamation'] = rep_excl
cl_results['contains_equals'] = equals
return cl_results #punc_freq # (punc_freq , cl_results)
# Store results as df for much easier vectorization...
else:
cl_results_df = pd.DataFrame()
cl_results_df['char_punc_freq'] = df_punc_freq #✅
#pd.concat(cl_results_df)
# Store results
cl_results_df['digit_freq'] = df_digits #✅
cl_results_df['whitespace_freq'] = df_whitespaces #✅
#cl_results['tab_freq'] = tabs does not occur in either corpus.
cl_results_df['linebreak_freq'] = df_lbreaks #✅
cl_results_df['uppercased_char_freq'] = df_upchars #✅
cl_results_df['special_char_freq'] = df_spc #✅
cl_results_df['repeated_questionmark'] = df_rep_quest #✅
cl_results_df['repeated_periods'] = df_rep_per #✅
cl_results_df['repeated_exclamation'] = df_rep_excl #✅
cl_results_df['contains_equals'] = df_equals #✅
return cl_results_df
#%%
# Debugging
# test_df = train.iloc[:50,:]
# test = get_cl(test_df, text_col='text_clean', analyze=False)
# Extract word-level features (lexical/morphological)
def get_wl(data, text_col='text_clean', analyze=False, docs=[]):
# SpaCy pipe for rule based sentence splitting.
#blank_nlp = spacy.blank('en') # spacy.load('en_core_web_sm')
# sentencizer = blank_nlp.add_pipe("sentencizer")
# morphologizer = blank_nlp.add_pipe('morphologizer')
# blank_nlp.initialize() #
# print(nlp.pipe_names)
print('Configuring spacy for word level')
nlp = spacy.load('en_core_web_sm', disable=["lemmatizer", 'ner'])
# disable parser in favor of senter and sentencizer due to speed https://spacy.io/models
nlp.disable_pipe("parser")
nlp.enable_pipe("senter")
# Load spellchecker
spell = SpellChecker()
# load exceptions to spellchecker (Twitter, covid specifc)
try:
spell.word_frequency.load_text_file('./utils/spell_additions.txt')
except:
pass
# 1 Get lengths (total/avg words, sentence)
# rewrite features as attributes of Lengths objects?
# class Lengths:
# def __init__(self, first_feat, second_feat):
# pass
#textList = data[text_col].to_list()
wl_results = dict()
# print('TOKENIZING WORD-LEVEL FEATURES')
# data to docs
if len(docs) <= 0:
docs = data[text_col].swifter.apply(lambda x: nlp(x))
#assert len(docs) == len(data[text_col])
# get list of sentences.
sents_c = docs.apply(lambda x: [s for s in x.sents])
# Words only (including numbers and @mentions)
sents_w = docs.apply(lambda x: [[t.text for t in s if\
t.is_punct == False and
t.is_space == False]\
for s in x.sents])
# list of *word* tokens in entire tweet.
toks = docs.apply(lambda x: [t.text for t in x if t.is_punct == False and\
t.is_space == False]) # could have used data['tokens_clean]
# alphabetic tokens only. (for spell checking)
toks_alpha = docs.apply(lambda x: [t.text for t in x if t.is_alpha == True])
# Debugging getting empty lists of alphabetic tokens.
#return pd.DataFrame({'tokens' : toks, 'alpha_tokens': toks_alpha})
toks_morph = docs.apply( lambda x: [t for t in x if t.is_alpha == True])
# print('\n GETTING WORD-LEVEL FEATURES')
# 1.1 total length of tweet in words
# c = {'total_words' : int}
# for doc in docs:
w_total_series = toks.map(len)
# 1.2 avg word length
awl = toks.apply(lambda x: sum(len(w) for w in x) / len(x))
# build dict with keys from list contained in feature_params value for lexical features > word_level. Check if they are there and populate them with the dicts below accordingly. Else don't.
# 1.3.1 avg sentence length (words)
asl_w = sents_w.apply(lambda x: sum(len(s) for s in x) / len(x))
# 1.3.2 avg sentence length (characters)
#asl_c = apply(lambda x: sum([len(''.join(s.text)) for s in x]))
asl_c = sents_c.apply(lambda x: sum(len(''.join(s.text)) for s in x) / len(x))
# 2.1 number of uppercased words.
uws = toks_alpha.apply(lambda x: len([t for t in x if t.isupper() == True]) / len(x) if len(x) > 0 else 0.0)
# 2.2 number of short words
# use len of token <=3
sws = toks_alpha.apply(lambda x: len([t for t in x if len(t) <=3]) / len(x) if len(x) > 0 else 0.0)
# 2.3 number of elongated words
# use regex \b\w{3,}\b
elw_pat = re.compile(r'(\w)\1{2,}', re.M)
elws = toks_alpha.apply(lambda x: len([t for t in x if elw_pat.search(t)]) / len(x) if len(x) > 0 else 0.0)
# 2.4 number of number-like tokens (both digits and numerals)
nss = docs.apply(lambda x: len([t for t in x if t.like_num == True]) / len(x))
# 2.5 frequency of specific verb tenses
pst = toks_morph.apply(lambda x: [t.morph for t in x if t.morph.get('Tense') == ['Past']]).map(len).divide(toks_alpha.map(len))
prs = toks_morph.apply(lambda x: [t.morph for t in x if t.morph.get('Tense') == ['Pres']]).map(len).divide(toks_alpha.map(len)) #NOTE using series.divide instead for if/else check with regular might give a problem with vectorizers.
adj_pos = toks_morph.apply(lambda x: [t.morph for t in x if t.morph.get('Degree') == ['Pos']]).map(len).divide(toks_alpha.map(len))
adj_c_s = toks_morph.apply(lambda x: [t.morph for t in x if t.morph.get('Degree') == ['Cmp'] or t.morph.get('Degree') == ['Sup']]).map(len).divide(toks_alpha.map(len))
# Here you could add future tense, mood etc.
# 2.6 Frequency of OOV words (according to spaCy model)
# token.is_oov
# 3. Frequencies of emotes/jis.
e = data['emotes'].apply(lambda x: len(x[0] + x[1])).divide(toks.map(len)) # normalized by tokens.
# 4. Non-standard spelling. Reconsider including this. It mostly captures proper names and acronyms if it has to be this fast.
sc = toks_alpha.apply(lambda x: spell.unknown(x)).map(len).divide(toks_alpha.map(len))
# 5. number of quoted words
# NOTE normalized by words (in match / in tweet)
quot_pat = re.compile(r"(\".+?\"|\B'.+?'\B)") # should this be quot_pat = re.compile(r("\".+?\"|\B'.+?'\B")) #
#quotes = data[text_col].apply(lambda x: re.findall(quot_pat, x).split(' ')).map(len).divide(toks_alpha.map(len)) # per word (split on whitespace).
print('Tokenizing quote spans')
quotes = data[text_col].swifter.apply(lambda x:
[t for t in nlp(' '.join(re.findall(quot_pat, x))) if t.text.isalnum()]).map(len).divide(toks.map(len))
#return pd.DataFrame({'org_text': data[text_col],'alpha_toks': toks_alpha, 'quoted_toks' : quotes, 'quoted_lens' : quotes_lens})
#quotes = data[text_col].apply(lambda x: re.findall(quot_pat, x)).map(len).divide(toks_alpha.map(len)) # not finished. need to tokenize matches.
#quotes = sents_c.apply(lambda x: len([re.findall(quot_pat, s) for s in x]) / len(x))# per sentence - doesn't work.
# 6. Vocab richness/complexity
# 6.1 Type-token ratio.
tt = toks_alpha.apply(lambda x: len(set(x)) / len(x) if len(x) > 0 else 0.0) # could use Counter instead of set()
# 6.2.1 Hapax legomena
### Adapted from https://stackoverflow.com/a/1801676 ###
hlg = toks_alpha.apply(lambda x: len([word for word, count in Counter(map(str.lower, x)).items() if count == 1]) / len(x) if len(x) > 0 else 0.0) # could also lower with list comprehension.
### accessed 13-05-2021 ###
# 6.2.2 Hapax dislegomena (words that occur twice only)
hdlg = toks_alpha.apply(lambda x: len([word for word, count in Counter(map(str.lower, x)).items() if count == 2]) / len(x) if len(x) > 0 else 0.0)
# Here you would implement complexity measures
#- Brunet's W Measure
#- Yule's K Characteristic
#- Honore's R Measure
#- Sichel's S Measure
#- Simpson's Diversity Index
# 7. syllable frequencies #NOTE this is averaged/normalized syllable frequncies. NOTE the syllables docs suggest using cmudict for accuracy over speed.
sfr = toks_alpha.apply(lambda x: sum([syllables.estimate(w) for w in x]) / len(x) if len(x) > 0 else 0.0) # could also use statistics.mean for all of these averages..
# 8. Readability
# Flesch-Kincaid reading ease
fk = data[text_col].apply(lambda x: textstat.flesch_reading_ease(x))
# # 8.1 Automated Readability Index
# ari = data[text_col].swifter.apply(lambda x: textstat.automated_readability_index(x))
# r_ari = listify(ari, feature_name='automated_readability_index')
# # 8.2 Coleman-Liau index
# cli = data[text_col].swifter.apply(lambda x: textstat.coleman_liau_index(x))
# r_cli = listify(cli, feature_name='coleman_liau_index')
# # 8.3 Dale Chall Readability Index
# dci = data[text_col].swifter.apply(lambda x: textstat.dale_chall_readability_score(x))
# r_dci = listify(dci, feature_name='dale_chall_index')
# # 8.4 Gunning Fog Index
# gfi = data[text_col].swifter.apply(lambda x: textstat.gunning_fog(x))
# r_gfi = listify(gfi, feature_name='gunning_fog_index')
# 8.5 Consensus based on all tests in textstat.
# consensus = data[text_col].swifter.apply(lambda x: textstat.text_standard(x, float_output=True))
# r_consensus = listify(consensus, feature_name='readability_consensus_score')
# Could add basic sentiment with doc.token.sentiment?
# Store results TODO store each list of dicts in separate dict on the same level.
# wl_results = {
# {'length_features' : w_total, w_len_avg, asl_w, asl_c},
# {'specific_w_frequencies' : upper_ws, shortws, elongws, nums, past_freq, pres_freq, adj_positives, adj_cmp_sup ,ems},
# {'nonstandard_spelling' : s_check},
# {'words_in_quotes' : quot_ws},
# {'richess/complexity' : ttr, hlgs, hldgs},
# {'syllable frequencies' : syl_freq},
# {'readability' : r_fk, r_ari, r_cli, r_dci, r_gfi, r_consensus}
# }
# print('\nSTORING RESULTS')
# print('DONE')
if analyze == True:
w_total = [{'len_total_words': x[1]} for x in toks.map(len).items()]
w_len_avg = [{'avg_word_length' : x[1]} for x in awl.items()]
asl_w_avg = [{'avg_sent_len_words': x[1]} for x in asl_w.items()]
asl_c_avg = [{'avg_sent_len_chars' : x[1]} for x in asl_c.items()] # move this to character level.
upper_ws = [{'upper_words': x[1]} for x in uws.items()]
shortws = [{'short_words': x[1]} for x in sws.items()]
elongws = [{'elongated_words' : x[1]} for x in elws.items()]
nums = listify(nss, feature_name='numerical_tokens_frequency')
past_freq = listify(pst, feature_name = 'past_tense_frequency')
pres_freq = listify(prs, feature_name='present_tense_frequency')
adj_positives = listify(adj_pos, feature_name='positive_adjectives')
adj_cmp_sup = listify(adj_c_s, feature_name='comp_and_sup_adjectives')
ems = [{'emote_frequencies': x[1]} for x in e.items()]
s_check = [{'nonstandard_words': x[1]} for x in sc.items()]
quot_ws = listify(quotes, feature_name = 'quotes_in_words')
ttr = [{'type-token_ratio': x[1]} for x in tt.items()]
hlgs = listify(hlg, feature_name= 'hapax_legomena')
hdlgs = listify(hdlg, feature_name='hapax_dislegomena')
syl_freq = [{'avg_syllable_freq': x[1]} for x in sfr.items()]
r_flk = [{'flesch_kincaid_reading_ease' : x[1]} for x in fk.items()]
# Store results in dict.
wl_results['total_word_len'] = w_total
wl_results['avg_word_len'] = w_len_avg
wl_results['avg_sentence_len_words'] = asl_w_avg
wl_results['avg_sentence_len_chars'] = asl_c_avg
wl_results['uppercased_words'] = upper_ws
wl_results['short_words'] = shortws
wl_results['elongated_words'] = elongws
wl_results['numberlike_tokens'] = nums
wl_results['past_tense_words'] = past_freq
wl_results['present_tense_words'] = pres_freq
wl_results['positive_adjectives'] = adj_positives
wl_results['comp_and_sup_adjectives'] = adj_cmp_sup
wl_results['emotes'] = ems
wl_results['nonstandard_spelling'] = s_check # exclude?
wl_results['quoted_words'] = quot_ws
wl_results['type_token_ratio'] = ttr
wl_results['hapax_legomena'] = hlgs
wl_results['hapax_dislegomena'] = hdlgs
wl_results['syllable_freqs'] = syl_freq #takes too long?
wl_results['readability_flesch_kincaid'] = r_flk
# wl_results['readability_ari'] = r_ari
# wl_results['readability_coleman_liau'] = r_cli
# wl_results['readability_dale_chall'] = r_dci
# wl_results['readability_gunning_fog'] = r_gfi
#wl_results['readability_consensus'] = r_consensus
return wl_results
else:
# Build dataframe
wl_results_df = pd.DataFrame()
wl_results_df['total_word_len'] = w_total_series #✅
wl_results_df['avg_word_len'] = awl #✅
wl_results_df['avg_sentence_len_words'] = asl_w #✅
wl_results_df['avg_sentence_len_chars'] = asl_c #✅
wl_results_df['uppercased_words'] = uws #✅
wl_results_df['short_words'] = sws #✅
wl_results_df['elongated_words'] = elws #✅
wl_results_df['numberlike_tokens'] = nss #✅
wl_results_df['past_tense_words'] = pst #✅
wl_results_df['present_tense_words'] = prs #✅
wl_results_df['positive_adjectives'] = adj_pos #✅
wl_results_df['comp_and_sup_adjectives'] = adj_c_s #✅
wl_results_df['emotes'] = e #✅
wl_results_df['nonstandard_spelling'] = sc #✅
wl_results_df['quoted_words'] = quotes # ✅
wl_results_df['type_token_ratio'] = tt #✅
wl_results_df['hapax_legomena'] = hlg #✅
wl_results_df['hapax_dislegomena'] = hdlg #✅
wl_results_df['syllable_freqs'] = sfr #✅
wl_results_df['readability_flesch_kincaid'] = fk #✅
return wl_results_df
#return get_wl(data)#get_cl(data) , get_wl(data)
#%%
# Debugging
# test_df = train.iloc[:50, :]
# test = get_wl(test_df, analyze=False)
# %%
#%%
# Extract sentence-level features (syntactic)
def get_sl(data, text_col = 'text_clean',cv=None , train=False, analyze=False):
# load spacy model.
print('Loading spacy model')
nlp = spacy.load('en_core_web_sm')
nlp.enable_pipe("senter") #TODO Added senter to get_sl while passing on docs for speed.
# For POS tags, you could map a pos tag sequence/vector to the tweet.
# Initialize CounVectorizer for pos ngrams. store pos tags in separate column and transform with sklearn-pandas per column instead.
if train == True:
cv = CountVectorizer(analyzer='word', ngram_range=(1,3))
else:
cv = cv
# Retoknize the text
docs = data[text_col].swifter.apply(lambda x: nlp(x))
#toks = docs.apply(lambda x: [t.text for t in x]) # not used.
#return pd.DataFrame({'docs' : docs.map(len) , 'toks': toks.map(len)})
# Frequencies
# 1.1 frequencies of stop words (i.e. function words)
sts = docs.apply(lambda x: len([t.text for t in x if t.is_stop == True]) / len(x)) # normalized by all tokens (including numbers and punct.)
# 1.2 frequencies of punctuation
pnct = docs.apply(lambda x: len([t.text for t in x if t.is_punct == True]) / len(x))
# 1.3 Frequencies of roots (normalized by total number of words in tweet).
rts = docs.apply(lambda x: len([(t, t.dep_) for t in [t for t in x if t.is_space == False] if t.dep_ == 'ROOT']) / len(x)) # This still includes number-like tokens, punctuation and mentions, since these are relevant in the dependency trees. Normalization could account for whitespaces, but doesn't have to.
# 3. POS frequencies.
# Extract pos tags:count (use Counter)
pos = docs.apply(lambda x: [t.pos_ for t in x if t.text.isalnum() == True])
pos_freq = docs.apply(lambda x: {p:c/len([t for t in x if t.text.isalnum() == True]) for p, c in Counter([t.pos_ for t in x if t.text.isalnum() == True ]).items()}) # normalized by alphanumeric tokens (since punctuation frequencies are captured separately).
#pos_freq = [{k:v} for k, v in pfreq.items()]
#return pd.DataFrame({'text' : data[text_col] , 'tokens' : toks, 'pos' : pos})
# 4. POS ngrams (n=uni-bi-tri) - TODO move to ngrams
# join pos tags into strings for CountVectorizer -> return as special case. Do a type check in the lookup or vectorize function that just passes the matrix on. OR pass on POS strings to vectorize in the vectorize function?
#print('fit/transforming posgrams')
pgrams = pos.str.join(' ').to_list()
if train == True:
pgram_matrix = cv.fit_transform(pgrams)
#return cv, pgram_matrix
else:
pgram_matrix = cv.transform(pgrams)
# Sketch of countvectorizing pos ngrams.
#cv.fit_transform(test.str.join(sep=' ').to_list()) # This works. consider how to get pos ngrams and still make them interpretable in the corpora - e.g. most frequent triplets? Does that even really tell you anthing? You could Counter or use a pandas method to get most frequent combination?
# {k:v for k, v in Counter(cv.get_feature_names()).items()}
# Note Counter has counter.most_common(n)
# Could use nltk.util.ngrams(sequence, n) as suggested here https://stackoverflow.com/questions/11763613/python-list-of-ngrams-with-frequencies
# 6. Sentiment?
# sentis = docs.apply(lambda x: sum([t.sentiment for t in x])) # doesn't work. needs training?
#return pd.DataFrame({'n_sents_spacy' : n_sents, 'n_sents_tstat' : n_sents_tstat})
if analyze == True:
# Store results.
stop_freq = listify(sts, feature_name='stopword_frequency')
punct_freq = listify(pnct, feature_name='punctuation_freq')
root_freq = listify(rts, feature_name='root_frequencies')
syn_results = {'stopword_freq': stop_freq,
'syn_punc_freq' : punct_freq,
'root_freq': root_freq,
'pos_freq' : list(pos_freq),
'pos_ngrams' : pgram_matrix}
return cv, syn_results
else:
syn_results_df = pd.DataFrame()
syn_results_df['stopword_freq'] = sts
syn_results_df['syn_punc_freq'] = pnct
syn_results_df['root_freq'] = rts
#syn_results_df['pos_freq'] = list(pos_freq)
#syn_results_df['pos_ngrams'] = pgram_matrix
return docs, cv, pgram_matrix, syn_results_df
# To call on test data, remember to call it on the cv returning after calling it on the training data - call it 'train_cv' in model.py
#%%
# Debugging
# test_df = train.iloc[:50,:]
# test = get_sl(test_df, train=True, analyze=True)
#%% ANCHOR testing get_syn
# extract_feats(test_df, analyze=True, train=True)
# NOTE when extracting in model.py, call twice instead of once.
#train.columns.get_loc('text_clean')
# test_df = train.iloc[:50, :] # versus list version: train_text[:20]
# test = get_syn(test_df)
# # val_test = get_lexical(train_text[:5])
#%%
#%%
# Extract document-level features (structural)
def get_dl(data, text_col='text_clean', analyze=True, docs=[]):
# 1. Number of sentences
if len(docs) <= 0:
print('Configuring spacy model for document level')
nlp = spacy.load('en_core_web_sm', disable=['lemmatizer', 'parser','tagger','ner'])
nlp.enable_pipe('senter') # this is the main diff between wl, sl and dl.
docs = data[text_col].swifter.apply(lambda x: nlp(x))
ns = docs.apply(lambda x: len([s for s in x.sents])) #en_web_sm is not as accurate as blank or textstat.
# ns = data[text_col].apply(
# lambda x: textstat.sentence_count(x))
# 2. Number of user mentions - absolute counts.
ms = data[text_col].str.count('@user', flags=re.I|re.M)
# Could be expanded to include hashtags and urls in the future here.
if analyze == True:
n_sents = listify(ns, feature_name = 'number_of_sentences')
ments = listify(ms, feature_name = 'number_of_mentions')
struc_results = {'n_sents': n_sents, 'n_mentions': ments} # before skiping listify.
#struc_results = {'n_sents' : ns, 'n_mentions' : ms}
return struc_results
else:
struc_results_df = pd.DataFrame()
struc_results_df['n_sents'] = ns #✅
struc_results_df['n_mentions'] = ms #✅
return struc_results_df
#%%
# Testing get_struc.
#test = get_dl(test_df, analyze=False)
#%%
# ANCHOR function to lookup and get specific [{features: x.x}] from extraction funct.
def feature_lookup(f_param_dict, extracted_features):
feature_name1 = [{'feature_name' : 0.0}]
for var in locals():
if var in f_param_dict['some_feature_cat1']:
return locals()[var]
# also look into dpath, dict-toolbox2
#%%
# Test feature_lookup
# t = {'some_feature_cat1': ['feature_name1', 'feature_name2']}
# feature_lookup(t)
#%%
def conc_features(matrixList):
# Concatenate feature vectors
# pass a list or dict of matrices and do list/dict comprehension/unpacking?
#combined_features = hstack([feature_vector1, feature_vector2], 'csr')
combined_features = hstack(matrixList, 'csr')
return combined_features
#%%
def d_vectorize(selected_feats, train=False, dv=None):
# Old approach: Vectorize all generated lists of dicts (stored in a dict or list?).
# if train == True:
# dv = DictVectorizer()
# #X = d.fit_transform(dictList)
# # Either store as list.
# dvList = []
# matList = []
# # Or in single dict
# #matDict = dict() using dv as a key just overwrites the value since they are all identical. Nesting the dict just complicates things even more...
# if train == True:
# # Iterate through feature lists of dictionaries (lexical, syntactic, structural)
# for feature_name, feat_list in selected_feats.items():
# #print(feature_name, feat_list)
# #return
# if feature_name == 'pos_ngrams': # Check for pos_ngrams (already vectorized)
# matList.append(feat_list) # if pos_ngrams feat matrix, just append it.
# #matDict[dv] = feat_list
# continue
# if train == True:
# feat_matrix = dv.fit_transform(feat_list)
# # NOTE storing each vectorizer
# dvList.append(dv)
# matList.append(feat_matrix)
# # This is the test case
# # The test case. transforming test data to fitted individual dvs.
# if train == False: #iterate through each dv and all the feature lists.
# feat_lists = []
# # this has to only fit once per feature dv-featurelist pair.
# for feature_name, feat_list in selected_feats.items():
# if feature_name == 'pos_ngrams':
# matList.append(feat_list)
# continue
# feat_lists.append(feat_list)
# #return(feat_lists)
# for dv, featList in list(zip(dvs, feat_lists)): # enable this to loop through both dvs and features.
# #print(dv, featList)
# feat_matrix = dv.transform(featList) # this needs to be passed its corresponding dv. if you store in zip/list, it should have the same, fixed order. but how to iterate?
# matList.append(feat_matrix)
# #matDict[dv] = feat_matrix
# # Is LIWC a separate case? Should be the same as engineered features.
# #return matDict#dv, matList #matDict.values() should be list of matrices equal to number of features. To be concatenated.
# return dvList, matList
# New approach - using dfs with selected features.
# 1. Get list of dicts, row-wise from selected features DF.
feats = selected_feats.to_dict('records')
if train == True:
dv = DictVectorizer()
feats_vecs = dv.fit_transform(feats)
return dv , feats_vecs
else:
feats_vecs = dv.transform(feats)
return dv, feats_vecs
#%%
####
# test_df = train.iloc[:50,:]
# sent_cv_train, extracted_train = extract_feats(test_df, text_col='text_clean', analyze=False, train=True, feature_pms=feature_params)
# sent_cv_test, extracted_test = extract_feats(val.iloc[:50,:], text_col='text_clean', analyze=False, train=False, cv=sent_cv_train, feature_pms=feature_params)
# train_dv, train_vecs = d_vectorize(train_selected_feats_df, train=True)
# test_dv, test_vecs = d_vectorize(test_selected_feats_df, train=False, dv=train_dv)
####
#test = d_vectorize(extracted_test, train=False, dvs=train_dvs)
# Then d_vectorize LIWC matches.
# Then concat all of the vectorized features.
# Then fit model!
#%%
def extract_feats(data, text_col='text_clean', feature_pms=dict(), analyze=False, cv=None, train=False):
# Data = dataframe - can be recast by child functions.
# See if resetting data index speeds up extraction.
data.reset_index(drop=True, inplace=True)
# lowercase all @USER mentions. An artifact from preprocessing.
data[text_col] = data[text_col].str.replace(
'@USER', '@user') # , inplace=True)
all_features_dict = dict()
all_features_df_list = []
selected_features = dict()
# 1. Call each of the extractor functions
# 1.3 Sentence-level # TODO moved up to pass docs to other extraction functs for speed.
print('Sentence level features')
if analyze == True:
docs = []
sent_cv, sent_lvl = get_sl(
data, text_col=text_col, cv=cv, analyze=analyze, train=train)
else:
docs, sent_cv, pgram_matrix, sent_lvl = get_sl(data, text_col=text_col, cv=cv, analyze=analyze, train=train)
# 1.1 Character-level (10 features)
print('Character level features')
char_lvl = get_cl(data, text_col=text_col, analyze=analyze)
# 1.2 Word-level
print('Word level features')
word_lvl = get_wl(data, text_col=text_col, analyze=analyze, docs=docs)
#sent_lvl = word_lvl.copy(deep=True)
#return sent_lvl
# if train == False:
# sent_cv, sent_lvl = get_sl(data, text_col=text_col, analyze=analyze)
# 1.4 Document-level
print('Document level features')
doc_lvl = get_dl(data, text_col=text_col, analyze=analyze, docs=docs)
#return doc_lvl
# Return all features if extracting for feature analysis. LIWC is analyzed separately.
if analyze == True:
# Store in dict
all_features_dict['character_level'] = char_lvl
all_features_dict['word_level'] = word_lvl
all_features_dict['sentence_level'] = sent_lvl # Maybe pop pgrams matrix into separate var/container?
all_features_dict['document_level'] = doc_lvl
return sent_cv, all_features_dict # pass sent_cv on to analyze_feats from here.
# Old approaches
# Option 1 - extracting flat list (of n instances) (of dicts with n features) to vectorize in one go.
# for feat_cat, feature_name in feature_pms['engineered'].items():
# if feat_cat in all_features.keys():
# selected_features[feat_cat] = all_features[feat_cat].values()
# return selected_features
# TODO how to make sure that all features align? Pandas? hstack before fitting?
# Option 2 - extract individual lists of [{'feature1' : feature_value}... {'feature2' : feature_value}] for each feauture?
# Iterate through features to pass on, given parameters in parameter dict.
# Get a flat list of all desired target features.
#target_feats = list(itertools.chain.from_iterable([fn for fn in feature_pms['engineered'].values()]))
# Lookup and retrieve each feature from all_features and store in selected_features
# Works, but return that awkward df with individual dicts.
# for feat_level, feat_name in all_features.items():# outer level {'feature_level': 'feature_name': [{'feature' : feature_val}]}
# for fn, fl in feat_name.items():
# if fn in target_feats:
# selected_features[fn] = fl
# Return selected features
# 2. return selectively for classification
if analyze == False:
# Get a flat list of all desired target features.
target_feats = list(itertools.chain.from_iterable([fn for fn in feature_pms['engineered'].values()]))
#return char_lvl, word_lvl, sent_lvl, doc_lvl
# Concatenate feature dfs for each level horizontally.
#all_feats_df = pd.concat([char_lvl, word_lvl, sent_lvl, doc_lvl], axis=1, join='inner') # works.
all_feats_df_list = [char_lvl, word_lvl, sent_lvl, doc_lvl]
# Mitigating duplicate indeces in dfs..
[df.reset_index(inplace=True, drop=True) for df in all_feats_df_list]
# 1.5 LIWC features
# parsed_liwc is called in the main namespace.
if feature_pms['liwc'] == True:
liwc_feats = pd.DataFrame.from_records(
liwc_match(parsed_liwc, data, extract=True))
#selected_features['liwc_counts'] = liwc_feats # store LIWC straight in selected_feats dict.
# index liwc_feats with data.index
liwc_feats.set_index(data.index, inplace=True)
all_feats_df_list.append(liwc_feats)
#return liwc_feats
#return sent_cv, all_features
# concat liwc features to df selected features.
# Concat all feature dfs.
#try:
all_feats_df = pd.concat(all_feats_df_list, axis=1, join='inner')
#print(all_feats_df)
#except:
# return all_feats_df_list# , [len(df) for df in all_feats_df_list]
# Select columns from all features df unless they are pos_ngrams. could add pos_freqs here.
# return all_feats_df 35+64=99 feats.
selected_feats_df = all_feats_df[[fn for fn in target_feats if fn != 'pos_ngrams']]
#return all_feats_df, target_feats
return sent_cv, pgram_matrix, selected_feats_df
#%% ANCHOR procedure for feature extraction.
# test_df = train.iloc[:50,:]
# #sent_cv, train_feats_df = extract_feats(test_df, feature_pms = feature_params, analyze=False, train=True)
# # Parse LIWC
# parsed_liwc = parse_liwc('../../../Data/LIWC2007dictionary poster.xls', text_col=text_col)
# # This is just a test of extraction with liwc.
# liwc_test = extract_feats(test_df, feature_pms = feature_params, analyze=False, train=True)
# # Dict_vectorize-fit_transform train.
# train_en_feat_vec = d_vectorize(train_selected_feats_df, train=True)
# # Combine feature matrices: # also use ngrams in model.py.
# train_feats_combined = conc_feat([train_pgram_matrix , train_en_feat_vec])
# # Extract test_feats
# sent_cv, test_pgram_matrix, test_selected_feats_df = extract_feats(val.iloc[:50,], feature_pms= feature_params, analyze=False, train=False, cv=sent_cv)
# # Dict_vectorize-transform test with train_dv.
# test_en_feat_vec = d_vectorize(test_selected_feats_df, train=False)
# -> concat pgram matrices and each selected feature df after dictvectorizing them.
####
#analysis = analyze_feats(train_feats_dict) # analysis case
#feats_for_vec = extract_feats(test_df, feature_pms=feature_params, analyze=False, train=True) # the train case
# test = extract_feats(test_df, analyze=True, cv=train_cv, train=False) # test case
#%%
# analyze features TODO move to data_exploration
def analyze_feats(featuresDict, resultpath='./exploring/feature_analysis/', cv=None):
# This function is called on the complete output of all the extract functions.
# Put all extracted features into a single dict. You then call vectorize and concat on that based on lookup either manual or via function.
# LIWC is handled separately..
# 0. Append all lists of dicts (dictLists) to one flat list.
featList = []
posfreqList = []
pgrams = None
# Smarter solution : calculate stats directly on dict values in lists of dicts.
statsDict = dict()
#Loop through top level featDict
for feat_level, feat_name in featuresDict.items():
#featList.append(pd.DataFrame(feat_name))
#print(feat_name.keys())
#Second level - individual feature names : ['feature' : int/flaot].
for feat, feat_value in feat_name.items():
#print( feat, type(feat_value))
# store pos features seperately.
if feat == 'pos_freq':
posfreqList.append(pd.DataFrame(feat_value))
continue
if feat == 'pos_ngrams':
pgrams = feat_value
continue
featList.append(pd.DataFrame(feat_value))
# Concat lists of extracted feature dataframes.
featDF = pd.concat(featList, axis=1)
#featDF = pd.DataFrame.from_records(featList)
posfreqDF = pd.concat(posfreqList) #
#return posfreqDF.mean().to_dict()
#return featDF
#return featDF, posfreqDF
# Split features into binary and frequency-based
# Get series of bools columnwise where any value is not a float and greater than 1.
filter_cols = featDF.select_dtypes(exclude=float).gt(1).any(0)
# Filter the featuresDF based on a list of the above indeces (i.e. column names) in from the series above.
#binDF = featDF.select_dtypes(exclude = float).between(0, 1)
binDF = featDF.loc[ : , filter_cols[filter_cols == False].index.tolist()]
absoDF = featDF.loc[ : , filter_cols[filter_cols == True].index.tolist()] # absolute counts
freqDF = featDF.select_dtypes(float)
#return binDF #, absoDF, freqDF
# Multindex
# reformDict = {}
# for outerKey, innerDict in featuresDict.items():
# for innerKey, values in innerDict.items():
# reformDict[(outerKey,
# innerKey)] = [value for value in]
# return reformDict
#return binDF, absoDF , freqDF, posfreqDF
# Write all the dfs to spreadsheet for easier visualization.
writer = pd.ExcelWriter(resultpath, engine='xlsxwriter')
binDF.to_excel(writer, sheet_name="binary_features")
absoDF.to_excel(writer, sheet_name='absolute_features')
freqDF.to_excel(writer, sheet_name="frequency_features")
posfreqDF.to_excel(writer, sheet_name="pos_frequencies")
#writer.save()
# Get basic stats for each category of feature values.
# Store results (binary, absolute and freq)
# binary features - percentages of positives.
bin_pcts = (binDF.sum().divide(len(binDF)) * 100)
# Store in dict
statsDict['binary_features (% positive)'] = bin_pcts.to_dict()
# Write to sheet.
bin_pcts.to_excel(writer, sheet_name = 'binary_percentages')
# absolute count features - sum totals.
abso_totals = absoDF.sum() # TODO change this to averages (e.g. average number of sentences etc..)
statsDict['absolute_features (sum total)'] = abso_totals.to_dict()
abso_totals.to_excel(writer, sheet_name = 'absolute_totals')
# frequency features - means.
freq_means = freqDF.mean() #.round(3)
statsDict['frequency_features (average)'] = freq_means.to_dict() # mean of the normalized frequencies (rounded to 3 decimal points) - EXCLUDING POS frequencies..
freq_means.to_excel(writer, sheet_name = 'frequencies_mean')
# POS frequencies
posfreq_means = posfreqDF.mean() #.round(3)
statsDict['pos_frequncies'] = posfreq_means.to_dict()
posfreq_means.to_excel(writer, sheet_name = 'posfreq_means')
# Analyzing ngrams
### Adatped from this excample https://gist.github.com/xiaoyu7016/73a2836298cfaef8212fd20a94736d56 ###
# # Here you just store the pgrams in a df.
pgram_freqs = pd.DataFrame(pgrams.sum(axis=0).T,
index=cv.get_feature_names(), columns=['freq']).sort_values(by='freq', ascending=False)#.max(20).plot(kind='bar',title='posgrams')
### Acessed 19-05-2021 ###
# Write pgrams to sheet in spreadsheet for inspection.
pgram_freqs.to_excel(writer, sheet_name='pos_ngram_frequencies')
# Save excel file.
writer.save()
# Return the results dictionary.
return statsDict
#%%
#%% ANCHOR combining / concatenating features.
#draft of how to use DictVectorizer on LIWC
# Example of vectorizing dictionary counts.
# dv = DictVectorizer(sparse=False) or sparse=True which yields sparse.csr.
# D = [{'cat1' : 3, 'cat2': 0 ...} {'cat1': 0, 'cat2': 55}...]
# X_train = dv.fit_transform(D) <- where D is the result of running the extract=True function on the training data.
# This is where you would normalize either the dense numpy.ndarray or sparse.csr:
# transformer = Normalizer().fit(X)
# X_train = transformer.transform(X_train)
# X_test = dv.fit(X_test) etc..
# Concatenate X (feature) arrays?
### https://stackoverflow.com/a/22710579 ###
# - "Don't forget to normalize this with sklearn.preprocessing.Normalizer, and be aware that even after normalization, those text_length features are bound to dominate the other features in terms of scale"
### Accessed 28-04-2021 ###
# Watch out for nan's in the resulting feature array?
# https://stackoverflow.com/q/39437687 reports on this as a side-effect of using DictVectorizer.
#%%
# NOTE You probably want to wrap all of these in one extract_features function...
#%%
#%%
if __name__ == "__main__":
#%%
# ANCHOR Read some data just to develop with to test with.
train =
|
pd.read_pickle('../../../Data/train/train.pkl')
|
pandas.read_pickle
|
# Library Imports
from joblib import load
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import MinMaxScaler
import streamlit as st
import _pickle as pickle
from random import sample
from PIL import Image
from scipy.stats import halfnorm
# Loading the Profiles
with open("refined_profiles.pkl",'rb') as fp:
df = pickle.load(fp)
with open("refined_cluster.pkl", 'rb') as fp:
cluster_df = pickle.load(fp)
with open("vectorized_refined.pkl", 'rb') as fp:
vect_df = pickle.load(fp)
# Loading the Classification Model
model = load("refined_model.joblib")
## Helper Functions
def string_convert(x):
"""
First converts the lists in the DF into strings
"""
if isinstance(x, list):
return ' '.join(x)
else:
return x
def vectorization(df, columns, input_df):
"""
Using recursion, iterate through the df until all the categories have been vectorized
"""
column_name = columns[0]
# Checking if the column name has been removed already
if column_name not in ['Bios', 'Movies','Religion', 'Music', 'Politics', 'Social Media', 'Sports']:
return df, input_df
# Encoding columns with respective values
if column_name in ['Religion', 'Politics']:
# Getting labels for the original df
df[column_name.lower()] = df[column_name].cat.codes
# Dictionary for the codes
d = dict(enumerate(df[column_name].cat.categories))
d = {v: k for k, v in d.items()}
# Getting labels for the input_df
input_df[column_name.lower()] = d[input_df[column_name].iloc[0]]
# Dropping the column names
input_df = input_df.drop(column_name, 1)
df = df.drop(column_name, 1)
return vectorization(df, df.columns, input_df)
# Vectorizing the other columns
else:
# Instantiating the Vectorizer
vectorizer = CountVectorizer()
# Fitting the vectorizer to the columns
x = vectorizer.fit_transform(df[column_name].values.astype('U'))
y = vectorizer.transform(input_df[column_name].values.astype('U'))
# Creating a new DF that contains the vectorized words
df_wrds = pd.DataFrame(x.toarray(), columns=vectorizer.get_feature_names())
y_wrds = pd.DataFrame(y.toarray(), columns=vectorizer.get_feature_names(), index=input_df.index)
# Concating the words DF with the original DF
new_df = pd.concat([df, df_wrds], axis=1)
y_df = pd.concat([input_df, y_wrds], 1)
# Dropping the column because it is no longer needed in place of vectorization
new_df = new_df.drop(column_name, axis=1)
y_df = y_df.drop(column_name, 1)
return vectorization(new_df, new_df.columns, y_df)
def scaling(df, input_df):
"""
Scales the new data with the scaler fitted from the previous data
"""
scaler = MinMaxScaler()
scaler.fit(df)
input_vect = pd.DataFrame(scaler.transform(input_df), index=input_df.index, columns=input_df.columns)
return input_vect
def top_ten(cluster, vect_df, input_vect):
"""
Returns the DataFrame containing the top 10 similar profiles to the new data
"""
des_cluster = vect_df[vect_df['Cluster #']==cluster[0]].drop('Cluster #', 1)
des_cluster = des_cluster.append(input_vect, sort=False)
user_n = input_vect.index[0]
corr = des_cluster.T.corrwith(des_cluster.loc[user_n])
top_10_sim = corr.sort_values(ascending=False)[1:4]
top_10 = df.loc[top_10_sim.index]
top_10[top_10.columns[1:]] = top_10[top_10.columns[1:]]
return top_10.astype('object')
def example_bios():
"""
Creates a list of random example bios from the original dataset
"""
st.write("-"*100)
st.text("Some example Bios:\n(Try to follow the same format)")
for i in sample(list(df.index), 3):
st.text(df['Bios'].loc[i])
st.write("-"*100)
# Creating a List for each Category
p = {}
movies = ['Adventure',
'Action',
'Drama',
'Comedy',
'Thriller',
'Horror',
'RomCom',
'Musical',
'Documentary']
p['Movies'] = [0.28,
0.21,
0.16,
0.14,
0.09,
0.06,
0.04,
0.01,
0.01]
tv = ['Comedy',
'Drama',
'Action/Adventure',
'Suspense/Thriller',
'Documentaries',
'Crime/Mystery',
'News',
'SciFi',
'History']
p['TV'] = [0.30,
0.23,
0.12,
0.12,
0.09,
0.08,
0.03,
0.02,
0.01]
religion = ['Catholic',
'Christian',
'Jewish',
'Muslim',
'Hindu',
'Buddhist',
'Spiritual',
'Other',
'Agnostic',
'Atheist']
p['Religion'] = [0.16,
0.16,
0.01,
0.19,
0.11,
0.05,
0.10,
0.09,
0.07,
0.06]
music = ['Rock',
'HipHop',
'Pop',
'Country',
'Latin',
'EDM',
'Gospel',
'Jazz',
'Classical']
p['Music'] = [0.30,
0.23,
0.20,
0.10,
0.06,
0.04,
0.03,
0.02,
0.02]
sports = ['Football',
'Baseball',
'Basketball',
'Hockey',
'Soccer',
'Other']
p['Sports'] = [0.34,
0.30,
0.16,
0.13,
0.04,
0.03]
politics = ['Liberal',
'Progressive',
'Centrist',
'Moderate',
'Conservative']
p['Politics'] = [0.26,
0.11,
0.11,
0.15,
0.37]
social = ['Facebook',
'Youtube',
'Twitter',
'Reddit',
'Instagram',
'Pinterest',
'LinkedIn',
'SnapChat',
'TikTok']
p['Social Media'] = [0.36,
0.27,
0.11,
0.09,
0.05,
0.03,
0.03,
0.03,
0.03]
age = None
# Lists of Names and the list of the lists
categories = [movies, religion, music, politics, social, sports, age]
names = ['Movies','Religion', 'Music', 'Politics', 'Social Media', 'Sports', 'Age']
combined = dict(zip(names, categories))
## Interactive Section
st.title("Machine Learning Model for Dating App Demo for AppSynergy")
st.header("Finding a Partner with AI Using NaiveBayes, KNN and SVM")
st.write("Use Machine Learning to Find the Top 3 Dating Profile Matches")
image = Image.open('roshan_graffiti.png')
st.image(image, use_column_width=True)
new_profile =
|
pd.DataFrame(columns=df.columns, index=[df.index[-1]+1])
|
pandas.DataFrame
|
'''This file holds all relevant functions necessary for starting the data analysis.
An object class for all account data is established, which will hold the raw data after import,
the processed data and all subdata configuration necessary for plotting.
The account data is provided through the account identification process in account_ident.py
Necessary functions for holiday extraction, roundies calculation as well as merging and cashbook linkage are provided in the Accounts class
Excel file is exported at the end exported.'''
import datetime
import locale
import os
import platform
import numpy as np
import pandas as pd
from basefunctions import account_ident
if platform.system() == 'Windows':
locale.setlocale(locale.LC_ALL, 'German')
FOLDER_SEP = '\\'
elif platform.system() == 'Darwin':
locale.setlocale(locale.LC_ALL, 'de_DE.utf-8')
FOLDER_SEP = '/'
else:
locale.setlocale(locale.LC_ALL, 'de_DE.utf8')
FOLDER_SEP = '/'
#_______________________________________ read in longterm data for training machine learning algorithm _______________
def longtermdata_import(path, decrypt_success):
if decrypt_success:
longterm_data = pd.read_csv(path, sep=';', parse_dates=[0, 1])
else:
empty_dataframe = {'time1':np.datetime64, 'time2':np.datetime64, 'act':str, 'text':str, 'val':float, 'month':str, 'cat':str, 'main cat':str, 'acc_name':str}
longterm_data = pd.DataFrame(columns=empty_dataframe.keys()).astype(empty_dataframe)
#extract saved account names in longterm_data
saved_accnames = list(longterm_data['acc_name'].unique())
saved_dataframe = {} #stored dataframes from import
for account_name in saved_accnames: #iterate through list with indices
saved_dataframe[account_name] = longterm_data.loc[longterm_data['acc_name'] == account_name] #get saved dataframes
return saved_dataframe
def longterm_export(path, saved_dataframe):#needs to be outside class in case program is closed before data integration
longterm_data = pd.DataFrame(columns=['time1', 'time2', 'act', 'text', 'val', 'month', 'cat', 'main cat', 'acc_name'])
for account_name in saved_dataframe.keys():
account_name_concat = saved_dataframe[account_name]
account_name_concat['acc_name'] = account_name #set account name in dataframe to be saved
longterm_data =
|
pd.concat([longterm_data, account_name_concat])
|
pandas.concat
|
#!/usr/bin/python3
# RNANet statistics
# Developed by <NAME> & <NAME>, 2021
# This file computes additional geometric measures over the produced dataset,
# and estimates their distribtuions through Gaussian mixture models.
# THIS FILE IS NOT SUPPOSED TO BE RUN DIRECTLY.
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as st
import Bio, glob, json, os, random, sqlite3, warnings
from Bio.PDB.MMCIFParser import MMCIFParser
from Bio.PDB.vectors import Vector, calc_angle, calc_dihedral
from multiprocessing import Pool, Value
from pandas.core.common import SettingWithCopyWarning
from setproctitle import setproctitle
from sklearn.mixture import GaussianMixture
from tqdm import tqdm
from RNAnet import init_with_tqdm, trace_unhandled_exceptions, warn, notify
runDir = os.getcwd()
# This dic stores the number laws to use in the GMM to estimate each parameter's distribution.
# If you do not want to trust this data, you can use the --rescan-nmodes option.
# GMMs will be trained between 1 and 8 modes and the best model will be kept.
modes_data = {
# bonded distances, all-atom, common to all. Some are also used for HiRE-RNA.
"C1'-C2'":3, "C2'-C3'":2, "C2'-O2'":2, "C3'-O3'":2, "C4'-C3'":2, "C4'-O4'":2, "C5'-C4'":2, "O3'-P":3, "O4'-C1'":3, "O5'-C5'":3, "P-O5'":3, "P-OP1":2, "P-OP2":2,
# bonded distances, all-atom, purines
"C4-C5":3, "C4-N9":2, "N3-C4":2, "C2-N3":2, "C2-N2":5, "N1-C2":3, "C6-N1":3, "C6-N6":3, "C6-O6":3, "C5-C6":2, "N7-C5":3, "C8-N7":2, "N9-C8":4, "C1'-N9":2,
# bonded distances, all-atom, pyrimidines
"C4-O4":2, "C4-N4":2, "C2-N1":1, "C2-O2":3, "N3-C2":4, "C4-N3":4, "C5-C4":2, "C6-C5":3, "N1-C6":2, "C1'-N1":2,
# torsions, all atom
"Alpha":3, "Beta":2, "Delta":2, "Epsilon":2, "Gamma":3, "Xhi":3, "Zeta":3,
# Pyle, distances
"C1'-P":3, "C4'-P":3, "P-C1'":3, "P-C4'":3,
# Pyle, angles
"C1'-P°-C1'°":3, "P-C1'-P°":2,
# Pyle, torsions
"Eta":1, "Theta":1, "Eta'":1, "Theta'":1, "Eta''":4, "Theta''":3,
# HiRE-RNA, distances
"C4'-P":3, "C4'-C1'":3, "C1'-B1":3, "B1-B2":2,
# HiRE-RNA, angles
"P-O5'-C5'":2, "O5'-C5'-C4'":1, "C5'-C4'-P":2, "C5'-C4'-C1'":2, "C4'-P-O5'":2, "C4'-C1'-B1":2, "C1'-C4'-P":2, "C1'-B1-B2":2,
# HiRE-RNA, torsions
"P-O5'-C5'-C4'":3, "O5'-C5'-C4'-P°":3, "O5'-C5'-C4'-C1'":3, "C5'-C4'-P°-O5'°":3, "C5'-C4'-C1'-B1":2, "C4'-P°-O5'°-C5'°":3, "C4'-C1'-B1-B2":3, "C1'-C4'-P°-O5'°":3,
# HiRE-RNA, basepairs
"cWW_AA_tips_distance":3, "cWW_AA_C1'-B1-B1pair":1, "cWW_AA_B1-B1pair-C1'pair":1, "cWW_AA_C4'-C1'-B1-B1pair":2, "cWW_AA_B1-B1pair-C1'pair-C4'pair":3, "cWW_AA_alpha_1":2, "cWW_AA_alpha_2":3, "cWW_AA_dB1":3, "cWW_AA_dB2":3,
"tWW_AA_tips_distance":1, "tWW_AA_C1'-B1-B1pair":1, "tWW_AA_B1-B1pair-C1'pair":1, "tWW_AA_C4'-C1'-B1-B1pair":2, "tWW_AA_B1-B1pair-C1'pair-C4'pair":3, "tWW_AA_alpha_1":2, "tWW_AA_alpha_2":1, "tWW_AA_dB1":1, "tWW_AA_dB2":2,
"cWH_AA_tips_distance":3, "cWH_AA_C1'-B1-B1pair":2, "cWH_AA_B1-B1pair-C1'pair":2, "cWH_AA_C4'-C1'-B1-B1pair":2, "cWH_AA_B1-B1pair-C1'pair-C4'pair":2, "cWH_AA_alpha_1":1, "cWH_AA_alpha_2":2, "cWH_AA_dB1":3, "cWH_AA_dB2":2,
"tWH_AA_tips_distance":3, "tWH_AA_C1'-B1-B1pair":1, "tWH_AA_B1-B1pair-C1'pair":3, "tWH_AA_C4'-C1'-B1-B1pair":2, "tWH_AA_B1-B1pair-C1'pair-C4'pair":2, "tWH_AA_alpha_1":1, "tWH_AA_alpha_2":3, "tWH_AA_dB1":2, "tWH_AA_dB2":1,
"cHW_AA_tips_distance":1, "cHW_AA_C1'-B1-B1pair":2, "cHW_AA_B1-B1pair-C1'pair":2, "cHW_AA_C4'-C1'-B1-B1pair":3, "cHW_AA_B1-B1pair-C1'pair-C4'pair":2, "cHW_AA_alpha_1":2, "cHW_AA_alpha_2":2, "cHW_AA_dB1":3, "cHW_AA_dB2":2,
"tHW_AA_tips_distance":4, "tHW_AA_C1'-B1-B1pair":2, "tHW_AA_B1-B1pair-C1'pair":2, "tHW_AA_C4'-C1'-B1-B1pair":2, "tHW_AA_B1-B1pair-C1'pair-C4'pair":2, "tHW_AA_alpha_1":2, "tHW_AA_alpha_2":1, "tHW_AA_dB1":2, "tHW_AA_dB2":1,
"cWS_AA_tips_distance":2, "cWS_AA_C1'-B1-B1pair":2, "cWS_AA_B1-B1pair-C1'pair":2, "cWS_AA_C4'-C1'-B1-B1pair":2, "cWS_AA_B1-B1pair-C1'pair-C4'pair":1, "cWS_AA_alpha_1":2, "cWS_AA_alpha_2":2, "cWS_AA_dB1":2, "cWS_AA_dB2":1,
"tWS_AA_tips_distance":2, "tWS_AA_C1'-B1-B1pair":2, "tWS_AA_B1-B1pair-C1'pair":2, "tWS_AA_C4'-C1'-B1-B1pair":3, "tWS_AA_B1-B1pair-C1'pair-C4'pair":1, "tWS_AA_alpha_1":2, "tWS_AA_alpha_2":2, "tWS_AA_dB1":2, "tWS_AA_dB2":3,
"cSW_AA_tips_distance":3, "cSW_AA_C1'-B1-B1pair":3, "cSW_AA_B1-B1pair-C1'pair":2, "cSW_AA_C4'-C1'-B1-B1pair":1, "cSW_AA_B1-B1pair-C1'pair-C4'pair":2, "cSW_AA_alpha_1":2, "cSW_AA_alpha_2":2, "cSW_AA_dB1":1, "cSW_AA_dB2":1,
"tSW_AA_tips_distance":3, "tSW_AA_C1'-B1-B1pair":3, "tSW_AA_B1-B1pair-C1'pair":3, "tSW_AA_C4'-C1'-B1-B1pair":2, "tSW_AA_B1-B1pair-C1'pair-C4'pair":2, "tSW_AA_alpha_1":2, "tSW_AA_alpha_2":2, "tSW_AA_dB1":2, "tSW_AA_dB2":2,
"cHH_AA_tips_distance":4, "cHH_AA_C1'-B1-B1pair":2, "cHH_AA_B1-B1pair-C1'pair":3, "cHH_AA_C4'-C1'-B1-B1pair":3, "cHH_AA_B1-B1pair-C1'pair-C4'pair":3, "cHH_AA_alpha_1":2, "cHH_AA_alpha_2":3, "cHH_AA_dB1":3, "cHH_AA_dB2":1,
"tHH_AA_tips_distance":2, "tHH_AA_C1'-B1-B1pair":2, "tHH_AA_B1-B1pair-C1'pair":2, "tHH_AA_C4'-C1'-B1-B1pair":3, "tHH_AA_B1-B1pair-C1'pair-C4'pair":1, "tHH_AA_alpha_1":2, "tHH_AA_alpha_2":2, "tHH_AA_dB1":2, "tHH_AA_dB2":2,
"cSH_AA_tips_distance":2, "cSH_AA_C1'-B1-B1pair":2, "cSH_AA_B1-B1pair-C1'pair":1, "cSH_AA_C4'-C1'-B1-B1pair":3, "cSH_AA_B1-B1pair-C1'pair-C4'pair":2, "cSH_AA_alpha_1":2, "cSH_AA_alpha_2":2, "cSH_AA_dB1":4, "cSH_AA_dB2":1,
"tSH_AA_tips_distance":2, "tSH_AA_C1'-B1-B1pair":1, "tSH_AA_B1-B1pair-C1'pair":2, "tSH_AA_C4'-C1'-B1-B1pair":2, "tSH_AA_B1-B1pair-C1'pair-C4'pair":2, "tSH_AA_alpha_1":2, "tSH_AA_alpha_2":3, "tSH_AA_dB1":2, "tSH_AA_dB2":2,
"cHS_AA_tips_distance":3, "cHS_AA_C1'-B1-B1pair":2, "cHS_AA_B1-B1pair-C1'pair":2, "cHS_AA_C4'-C1'-B1-B1pair":2, "cHS_AA_B1-B1pair-C1'pair-C4'pair":1, "cHS_AA_alpha_1":2, "cHS_AA_alpha_2":2, "cHS_AA_dB1":1, "cHS_AA_dB2":4,
"tHS_AA_tips_distance":4, "tHS_AA_C1'-B1-B1pair":2, "tHS_AA_B1-B1pair-C1'pair":2, "tHS_AA_C4'-C1'-B1-B1pair":2, "tHS_AA_B1-B1pair-C1'pair-C4'pair":1, "tHS_AA_alpha_1":2, "tHS_AA_alpha_2":1, "tHS_AA_dB1":2, "tHS_AA_dB2":1,
"cSS_AA_tips_distance":6, "cSS_AA_C1'-B1-B1pair":3, "cSS_AA_B1-B1pair-C1'pair":3, "cSS_AA_C4'-C1'-B1-B1pair":2, "cSS_AA_B1-B1pair-C1'pair-C4'pair":2, "cSS_AA_alpha_1":3, "cSS_AA_alpha_2":3, "cSS_AA_dB1":3, "cSS_AA_dB2":5,
"tSS_AA_tips_distance":5, "tSS_AA_C1'-B1-B1pair":1, "tSS_AA_B1-B1pair-C1'pair":1, "tSS_AA_C4'-C1'-B1-B1pair":2, "tSS_AA_B1-B1pair-C1'pair-C4'pair":1, "tSS_AA_alpha_1":3, "tSS_AA_alpha_2":1, "tSS_AA_dB1":4, "tSS_AA_dB2":2,
"cWW_AC_tips_distance":2, "cWW_AC_C1'-B1-B1pair":1, "cWW_AC_B1-B1pair-C1'pair":2, "cWW_AC_C4'-C1'-B1-B1pair":2, "cWW_AC_B1-B1pair-C1'pair-C4'pair":2, "cWW_AC_alpha_1":1, "cWW_AC_alpha_2":2, "cWW_AC_dB1":3, "cWW_AC_dB2":3,
"tWW_AC_tips_distance":2, "tWW_AC_C1'-B1-B1pair":3, "tWW_AC_B1-B1pair-C1'pair":2, "tWW_AC_C4'-C1'-B1-B1pair":3, "tWW_AC_B1-B1pair-C1'pair-C4'pair":3, "tWW_AC_alpha_1":3, "tWW_AC_alpha_2":2, "tWW_AC_dB1":4, "tWW_AC_dB2":3,
"cWH_AC_tips_distance":5, "cWH_AC_C1'-B1-B1pair":2, "cWH_AC_B1-B1pair-C1'pair":2, "cWH_AC_C4'-C1'-B1-B1pair":1, "cWH_AC_B1-B1pair-C1'pair-C4'pair":2, "cWH_AC_alpha_1":2, "cWH_AC_alpha_2":2, "cWH_AC_dB1":4, "cWH_AC_dB2":4,
"tWH_AC_tips_distance":8, "tWH_AC_C1'-B1-B1pair":1, "tWH_AC_B1-B1pair-C1'pair":2, "tWH_AC_C4'-C1'-B1-B1pair":2, "tWH_AC_B1-B1pair-C1'pair-C4'pair":3, "tWH_AC_alpha_1":2, "tWH_AC_alpha_2":2, "tWH_AC_dB1":3, "tWH_AC_dB2":3,
"cHW_AC_tips_distance":2, "cHW_AC_C1'-B1-B1pair":2, "cHW_AC_B1-B1pair-C1'pair":2, "cHW_AC_C4'-C1'-B1-B1pair":3, "cHW_AC_B1-B1pair-C1'pair-C4'pair":2, "cHW_AC_alpha_1":2, "cHW_AC_alpha_2":3, "cHW_AC_dB1":2, "cHW_AC_dB2":5,
"tHW_AC_tips_distance":3, "tHW_AC_C1'-B1-B1pair":2, "tHW_AC_B1-B1pair-C1'pair":3, "tHW_AC_C4'-C1'-B1-B1pair":3, "tHW_AC_B1-B1pair-C1'pair-C4'pair":2, "tHW_AC_alpha_1":2, "tHW_AC_alpha_2":2, "tHW_AC_dB1":3, "tHW_AC_dB2":3,
"cWS_AC_tips_distance":3, "cWS_AC_C1'-B1-B1pair":2, "cWS_AC_B1-B1pair-C1'pair":1, "cWS_AC_C4'-C1'-B1-B1pair":2, "cWS_AC_B1-B1pair-C1'pair-C4'pair":1, "cWS_AC_alpha_1":2, "cWS_AC_alpha_2":1, "cWS_AC_dB1":1, "cWS_AC_dB2":1,
"tWS_AC_tips_distance":4, "tWS_AC_C1'-B1-B1pair":2, "tWS_AC_B1-B1pair-C1'pair":1, "tWS_AC_C4'-C1'-B1-B1pair":2, "tWS_AC_B1-B1pair-C1'pair-C4'pair":2, "tWS_AC_alpha_1":3, "tWS_AC_alpha_2":1, "tWS_AC_dB1":3, "tWS_AC_dB2":2,
"cSW_AC_tips_distance":6, "cSW_AC_C1'-B1-B1pair":2, "cSW_AC_B1-B1pair-C1'pair":2, "cSW_AC_C4'-C1'-B1-B1pair":2, "cSW_AC_B1-B1pair-C1'pair-C4'pair":2, "cSW_AC_alpha_1":3, "cSW_AC_alpha_2":2, "cSW_AC_dB1":2, "cSW_AC_dB2":3,
"tSW_AC_tips_distance":5, "tSW_AC_C1'-B1-B1pair":1, "tSW_AC_B1-B1pair-C1'pair":2, "tSW_AC_C4'-C1'-B1-B1pair":1, "tSW_AC_B1-B1pair-C1'pair-C4'pair":2, "tSW_AC_alpha_1":1, "tSW_AC_alpha_2":2, "tSW_AC_dB1":2, "tSW_AC_dB2":3,
"cHH_AC_tips_distance":5, "cHH_AC_C1'-B1-B1pair":2, "cHH_AC_B1-B1pair-C1'pair":2, "cHH_AC_C4'-C1'-B1-B1pair":2, "cHH_AC_B1-B1pair-C1'pair-C4'pair":1, "cHH_AC_alpha_1":3, "cHH_AC_alpha_2":3, "cHH_AC_dB1":3, "cHH_AC_dB2":4,
"tHH_AC_tips_distance":4, "tHH_AC_C1'-B1-B1pair":1, "tHH_AC_B1-B1pair-C1'pair":2, "tHH_AC_C4'-C1'-B1-B1pair":2, "tHH_AC_B1-B1pair-C1'pair-C4'pair":3, "tHH_AC_alpha_1":2, "tHH_AC_alpha_2":2, "tHH_AC_dB1":4, "tHH_AC_dB2":3,
"cSH_AC_tips_distance":3, "cSH_AC_C1'-B1-B1pair":1, "cSH_AC_B1-B1pair-C1'pair":3, "cSH_AC_C4'-C1'-B1-B1pair":1, "cSH_AC_B1-B1pair-C1'pair-C4'pair":2, "cSH_AC_alpha_1":1, "cSH_AC_alpha_2":1, "cSH_AC_dB1":2, "cSH_AC_dB2":6,
"tSH_AC_tips_distance":8, "tSH_AC_C1'-B1-B1pair":3, "tSH_AC_B1-B1pair-C1'pair":2, "tSH_AC_C4'-C1'-B1-B1pair":1, "tSH_AC_B1-B1pair-C1'pair-C4'pair":2, "tSH_AC_alpha_1":2, "tSH_AC_alpha_2":3, "tSH_AC_dB1":1, "tSH_AC_dB2":2,
"cHS_AC_tips_distance":4, "cHS_AC_C1'-B1-B1pair":1, "cHS_AC_B1-B1pair-C1'pair":1, "cHS_AC_C4'-C1'-B1-B1pair":2, "cHS_AC_B1-B1pair-C1'pair-C4'pair":1, "cHS_AC_alpha_1":1, "cHS_AC_alpha_2":1, "cHS_AC_dB1":3, "cHS_AC_dB2":2,
"tHS_AC_tips_distance":8, "tHS_AC_C1'-B1-B1pair":1, "tHS_AC_B1-B1pair-C1'pair":2, "tHS_AC_C4'-C1'-B1-B1pair":2, "tHS_AC_B1-B1pair-C1'pair-C4'pair":2, "tHS_AC_alpha_1":1, "tHS_AC_alpha_2":1, "tHS_AC_dB1":1, "tHS_AC_dB2":1,
"cSS_AC_tips_distance":2, "cSS_AC_C1'-B1-B1pair":2, "cSS_AC_B1-B1pair-C1'pair":2, "cSS_AC_C4'-C1'-B1-B1pair":1, "cSS_AC_B1-B1pair-C1'pair-C4'pair":1, "cSS_AC_alpha_1":2, "cSS_AC_alpha_2":1, "cSS_AC_dB1":1, "cSS_AC_dB2":5,
"tSS_AC_tips_distance":5, "tSS_AC_C1'-B1-B1pair":2, "tSS_AC_B1-B1pair-C1'pair":2, "tSS_AC_C4'-C1'-B1-B1pair":1, "tSS_AC_B1-B1pair-C1'pair-C4'pair":2, "tSS_AC_alpha_1":2, "tSS_AC_alpha_2":2, "tSS_AC_dB1":3, "tSS_AC_dB2":5,
"cWW_AG_tips_distance":3, "cWW_AG_C1'-B1-B1pair":1, "cWW_AG_B1-B1pair-C1'pair":1, "cWW_AG_C4'-C1'-B1-B1pair":2, "cWW_AG_B1-B1pair-C1'pair-C4'pair":2, "cWW_AG_alpha_1":1, "cWW_AG_alpha_2":1, "cWW_AG_dB1":1, "cWW_AG_dB2":1,
"tWW_AG_tips_distance":5, "tWW_AG_C1'-B1-B1pair":1, "tWW_AG_B1-B1pair-C1'pair":1, "tWW_AG_C4'-C1'-B1-B1pair":2, "tWW_AG_B1-B1pair-C1'pair-C4'pair":2, "tWW_AG_alpha_1":2, "tWW_AG_alpha_2":2, "tWW_AG_dB1":2, "tWW_AG_dB2":3,
"cWH_AG_tips_distance":4, "cWH_AG_C1'-B1-B1pair":1, "cWH_AG_B1-B1pair-C1'pair":1, "cWH_AG_C4'-C1'-B1-B1pair":2, "cWH_AG_B1-B1pair-C1'pair-C4'pair":2, "cWH_AG_alpha_1":3, "cWH_AG_alpha_2":1, "cWH_AG_dB1":2, "cWH_AG_dB2":1,
"tWH_AG_tips_distance":3, "tWH_AG_C1'-B1-B1pair":1, "tWH_AG_B1-B1pair-C1'pair":1, "tWH_AG_C4'-C1'-B1-B1pair":2, "tWH_AG_B1-B1pair-C1'pair-C4'pair":2, "tWH_AG_alpha_1":2, "tWH_AG_alpha_2":1, "tWH_AG_dB1":2, "tWH_AG_dB2":1,
"cHW_AG_tips_distance":2, "cHW_AG_C1'-B1-B1pair":2, "cHW_AG_B1-B1pair-C1'pair":1, "cHW_AG_C4'-C1'-B1-B1pair":2, "cHW_AG_B1-B1pair-C1'pair-C4'pair":1, "cHW_AG_alpha_1":1, "cHW_AG_alpha_2":2, "cHW_AG_dB1":2, "cHW_AG_dB2":2,
"tHW_AG_tips_distance":3, "tHW_AG_C1'-B1-B1pair":2, "tHW_AG_B1-B1pair-C1'pair":2, "tHW_AG_C4'-C1'-B1-B1pair":2, "tHW_AG_B1-B1pair-C1'pair-C4'pair":2, "tHW_AG_alpha_1":2, "tHW_AG_alpha_2":2, "tHW_AG_dB1":2, "tHW_AG_dB2":2,
"cWS_AG_tips_distance":1, "cWS_AG_C1'-B1-B1pair":3, "cWS_AG_B1-B1pair-C1'pair":1, "cWS_AG_C4'-C1'-B1-B1pair":1, "cWS_AG_B1-B1pair-C1'pair-C4'pair":1, "cWS_AG_alpha_1":2, "cWS_AG_alpha_2":2, "cWS_AG_dB1":2, "cWS_AG_dB2":1,
"tWS_AG_tips_distance":6, "tWS_AG_C1'-B1-B1pair":1, "tWS_AG_B1-B1pair-C1'pair":2, "tWS_AG_C4'-C1'-B1-B1pair":2, "tWS_AG_B1-B1pair-C1'pair-C4'pair":1, "tWS_AG_alpha_1":2, "tWS_AG_alpha_2":2, "tWS_AG_dB1":1, "tWS_AG_dB2":3,
"cSW_AG_tips_distance":4, "cSW_AG_C1'-B1-B1pair":1, "cSW_AG_B1-B1pair-C1'pair":2, "cSW_AG_C4'-C1'-B1-B1pair":1, "cSW_AG_B1-B1pair-C1'pair-C4'pair":2, "cSW_AG_alpha_1":1, "cSW_AG_alpha_2":2, "cSW_AG_dB1":3, "cSW_AG_dB2":1,
"tSW_AG_tips_distance":7, "tSW_AG_C1'-B1-B1pair":3, "tSW_AG_B1-B1pair-C1'pair":2, "tSW_AG_C4'-C1'-B1-B1pair":2, "tSW_AG_B1-B1pair-C1'pair-C4'pair":2, "tSW_AG_alpha_1":2, "tSW_AG_alpha_2":2, "tSW_AG_dB1":3, "tSW_AG_dB2":3,
"cHH_AG_tips_distance":2, "cHH_AG_C1'-B1-B1pair":2, "cHH_AG_B1-B1pair-C1'pair":4, "cHH_AG_C4'-C1'-B1-B1pair":3, "cHH_AG_B1-B1pair-C1'pair-C4'pair":2, "cHH_AG_alpha_1":2, "cHH_AG_alpha_2":3, "cHH_AG_dB1":1, "cHH_AG_dB2":2,
"tHH_AG_tips_distance":8, "tHH_AG_C1'-B1-B1pair":3, "tHH_AG_B1-B1pair-C1'pair":3, "tHH_AG_C4'-C1'-B1-B1pair":3, "tHH_AG_B1-B1pair-C1'pair-C4'pair":2, "tHH_AG_alpha_1":3, "tHH_AG_alpha_2":3, "tHH_AG_dB1":1, "tHH_AG_dB2":2,
"cSH_AG_tips_distance":5, "cSH_AG_C1'-B1-B1pair":2, "cSH_AG_B1-B1pair-C1'pair":2, "cSH_AG_C4'-C1'-B1-B1pair":2, "cSH_AG_B1-B1pair-C1'pair-C4'pair":2, "cSH_AG_alpha_1":3, "cSH_AG_alpha_2":1, "cSH_AG_dB1":1, "cSH_AG_dB2":3,
"tSH_AG_tips_distance":5, "tSH_AG_C1'-B1-B1pair":2, "tSH_AG_B1-B1pair-C1'pair":2, "tSH_AG_C4'-C1'-B1-B1pair":2, "tSH_AG_B1-B1pair-C1'pair-C4'pair":3, "tSH_AG_alpha_1":2, "tSH_AG_alpha_2":4, "tSH_AG_dB1":3, "tSH_AG_dB2":2,
"cHS_AG_tips_distance":1, "cHS_AG_C1'-B1-B1pair":3, "cHS_AG_B1-B1pair-C1'pair":1, "cHS_AG_C4'-C1'-B1-B1pair":3, "cHS_AG_B1-B1pair-C1'pair-C4'pair":1, "cHS_AG_alpha_1":2, "cHS_AG_alpha_2":3, "cHS_AG_dB1":1, "cHS_AG_dB2":2,
"tHS_AG_tips_distance":6, "tHS_AG_C1'-B1-B1pair":1, "tHS_AG_B1-B1pair-C1'pair":2, "tHS_AG_C4'-C1'-B1-B1pair":2, "tHS_AG_B1-B1pair-C1'pair-C4'pair":2, "tHS_AG_alpha_1":1, "tHS_AG_alpha_2":2, "tHS_AG_dB1":2, "tHS_AG_dB2":1,
"cSS_AG_tips_distance":2, "cSS_AG_C1'-B1-B1pair":2, "cSS_AG_B1-B1pair-C1'pair":2, "cSS_AG_C4'-C1'-B1-B1pair":2, "cSS_AG_B1-B1pair-C1'pair-C4'pair":1, "cSS_AG_alpha_1":2, "cSS_AG_alpha_2":1, "cSS_AG_dB1":2, "cSS_AG_dB2":4,
"tSS_AG_tips_distance":4, "tSS_AG_C1'-B1-B1pair":3, "tSS_AG_B1-B1pair-C1'pair":1, "tSS_AG_C4'-C1'-B1-B1pair":2, "tSS_AG_B1-B1pair-C1'pair-C4'pair":1, "tSS_AG_alpha_1":2, "tSS_AG_alpha_2":1, "tSS_AG_dB1":2, "tSS_AG_dB2":4,
"cWW_AU_tips_distance":3, "cWW_AU_C1'-B1-B1pair":1, "cWW_AU_B1-B1pair-C1'pair":2, "cWW_AU_C4'-C1'-B1-B1pair":3, "cWW_AU_B1-B1pair-C1'pair-C4'pair":2, "cWW_AU_alpha_1":3, "cWW_AU_alpha_2":1, "cWW_AU_dB1":4, "cWW_AU_dB2":2,
"tWW_AU_tips_distance":3, "tWW_AU_C1'-B1-B1pair":3, "tWW_AU_B1-B1pair-C1'pair":3, "tWW_AU_C4'-C1'-B1-B1pair":2, "tWW_AU_B1-B1pair-C1'pair-C4'pair":2, "tWW_AU_alpha_1":3, "tWW_AU_alpha_2":2, "tWW_AU_dB1":3, "tWW_AU_dB2":2,
"cWH_AU_tips_distance":5, "cWH_AU_C1'-B1-B1pair":2, "cWH_AU_B1-B1pair-C1'pair":2, "cWH_AU_C4'-C1'-B1-B1pair":2, "cWH_AU_B1-B1pair-C1'pair-C4'pair":2, "cWH_AU_alpha_1":1, "cWH_AU_alpha_2":3, "cWH_AU_dB1":3, "cWH_AU_dB2":3,
"tWH_AU_tips_distance":6, "tWH_AU_C1'-B1-B1pair":1, "tWH_AU_B1-B1pair-C1'pair":3, "tWH_AU_C4'-C1'-B1-B1pair":2, "tWH_AU_B1-B1pair-C1'pair-C4'pair":2, "tWH_AU_alpha_1":2, "tWH_AU_alpha_2":2, "tWH_AU_dB1":1, "tWH_AU_dB2":3,
"cHW_AU_tips_distance":3, "cHW_AU_C1'-B1-B1pair":3, "cHW_AU_B1-B1pair-C1'pair":3, "cHW_AU_C4'-C1'-B1-B1pair":2, "cHW_AU_B1-B1pair-C1'pair-C4'pair":2, "cHW_AU_alpha_1":1, "cHW_AU_alpha_2":2, "cHW_AU_dB1":2, "cHW_AU_dB2":2,
"tHW_AU_tips_distance":3, "tHW_AU_C1'-B1-B1pair":2, "tHW_AU_B1-B1pair-C1'pair":2, "tHW_AU_C4'-C1'-B1-B1pair":2, "tHW_AU_B1-B1pair-C1'pair-C4'pair":2, "tHW_AU_alpha_1":2, "tHW_AU_alpha_2":1, "tHW_AU_dB1":1, "tHW_AU_dB2":4,
"cWS_AU_tips_distance":2, "cWS_AU_C1'-B1-B1pair":1, "cWS_AU_B1-B1pair-C1'pair":1, "cWS_AU_C4'-C1'-B1-B1pair":2, "cWS_AU_B1-B1pair-C1'pair-C4'pair":1, "cWS_AU_alpha_1":2, "cWS_AU_alpha_2":2, "cWS_AU_dB1":2, "cWS_AU_dB2":5,
"tWS_AU_tips_distance":2, "tWS_AU_C1'-B1-B1pair":2, "tWS_AU_B1-B1pair-C1'pair":2, "tWS_AU_C4'-C1'-B1-B1pair":2, "tWS_AU_B1-B1pair-C1'pair-C4'pair":1, "tWS_AU_alpha_1":2, "tWS_AU_alpha_2":2, "tWS_AU_dB1":3, "tWS_AU_dB2":4,
"cSW_AU_tips_distance":2, "cSW_AU_C1'-B1-B1pair":3, "cSW_AU_B1-B1pair-C1'pair":2, "cSW_AU_C4'-C1'-B1-B1pair":2, "cSW_AU_B1-B1pair-C1'pair-C4'pair":2, "cSW_AU_alpha_1":3, "cSW_AU_alpha_2":2, "cSW_AU_dB1":2, "cSW_AU_dB2":3,
"tSW_AU_tips_distance":3, "tSW_AU_C1'-B1-B1pair":2, "tSW_AU_B1-B1pair-C1'pair":3, "tSW_AU_C4'-C1'-B1-B1pair":3, "tSW_AU_B1-B1pair-C1'pair-C4'pair":2, "tSW_AU_alpha_1":2, "tSW_AU_alpha_2":1, "tSW_AU_dB1":3, "tSW_AU_dB2":4,
"cHH_AU_tips_distance":6, "cHH_AU_C1'-B1-B1pair":2, "cHH_AU_B1-B1pair-C1'pair":1, "cHH_AU_C4'-C1'-B1-B1pair":2, "cHH_AU_B1-B1pair-C1'pair-C4'pair":1, "cHH_AU_alpha_1":2, "cHH_AU_alpha_2":2, "cHH_AU_dB1":1, "cHH_AU_dB2":2,
"tHH_AU_tips_distance":8, "tHH_AU_C1'-B1-B1pair":3, "tHH_AU_B1-B1pair-C1'pair":3, "tHH_AU_C4'-C1'-B1-B1pair":3, "tHH_AU_B1-B1pair-C1'pair-C4'pair":2, "tHH_AU_alpha_1":3, "tHH_AU_alpha_2":3, "tHH_AU_dB1":1, "tHH_AU_dB2":3,
"cSH_AU_tips_distance":5, "cSH_AU_C1'-B1-B1pair":1, "cSH_AU_B1-B1pair-C1'pair":3, "cSH_AU_C4'-C1'-B1-B1pair":3, "cSH_AU_B1-B1pair-C1'pair-C4'pair":2, "cSH_AU_alpha_1":2, "cSH_AU_alpha_2":1, "cSH_AU_dB1":4, "cSH_AU_dB2":4,
"tSH_AU_tips_distance":5, "tSH_AU_C1'-B1-B1pair":3, "tSH_AU_B1-B1pair-C1'pair":1, "tSH_AU_C4'-C1'-B1-B1pair":1, "tSH_AU_B1-B1pair-C1'pair-C4'pair":2, "tSH_AU_alpha_1":3, "tSH_AU_alpha_2":3, "tSH_AU_dB1":3, "tSH_AU_dB2":4,
"cHS_AU_tips_distance":2, "cHS_AU_C1'-B1-B1pair":3, "cHS_AU_B1-B1pair-C1'pair":1, "cHS_AU_C4'-C1'-B1-B1pair":2, "cHS_AU_B1-B1pair-C1'pair-C4'pair":2, "cHS_AU_alpha_1":2, "cHS_AU_alpha_2":2, "cHS_AU_dB1":1, "cHS_AU_dB2":3,
"tHS_AU_tips_distance":2, "tHS_AU_C1'-B1-B1pair":2, "tHS_AU_B1-B1pair-C1'pair":2, "tHS_AU_C4'-C1'-B1-B1pair":2, "tHS_AU_B1-B1pair-C1'pair-C4'pair":3, "tHS_AU_alpha_1":3, "tHS_AU_alpha_2":2, "tHS_AU_dB1":3, "tHS_AU_dB2":3,
"cSS_AU_tips_distance":3, "cSS_AU_C1'-B1-B1pair":2, "cSS_AU_B1-B1pair-C1'pair":2, "cSS_AU_C4'-C1'-B1-B1pair":1, "cSS_AU_B1-B1pair-C1'pair-C4'pair":2, "cSS_AU_alpha_1":3, "cSS_AU_alpha_2":2, "cSS_AU_dB1":1, "cSS_AU_dB2":4,
"tSS_AU_tips_distance":5, "tSS_AU_C1'-B1-B1pair":2, "tSS_AU_B1-B1pair-C1'pair":1, "tSS_AU_C4'-C1'-B1-B1pair":3, "tSS_AU_B1-B1pair-C1'pair-C4'pair":2, "tSS_AU_alpha_1":2, "tSS_AU_alpha_2":3, "tSS_AU_dB1":3, "tSS_AU_dB2":8,
"cWW_CA_tips_distance":2, "cWW_CA_C1'-B1-B1pair":2, "cWW_CA_B1-B1pair-C1'pair":1, "cWW_CA_C4'-C1'-B1-B1pair":2, "cWW_CA_B1-B1pair-C1'pair-C4'pair":2, "cWW_CA_alpha_1":1, "cWW_CA_alpha_2":2, "cWW_CA_dB1":1, "cWW_CA_dB2":1,
"tWW_CA_tips_distance":4, "tWW_CA_C1'-B1-B1pair":2, "tWW_CA_B1-B1pair-C1'pair":2, "tWW_CA_C4'-C1'-B1-B1pair":3, "tWW_CA_B1-B1pair-C1'pair-C4'pair":2, "tWW_CA_alpha_1":2, "tWW_CA_alpha_2":1, "tWW_CA_dB1":4, "tWW_CA_dB2":2,
"cWH_CA_tips_distance":3, "cWH_CA_C1'-B1-B1pair":3, "cWH_CA_B1-B1pair-C1'pair":2, "cWH_CA_C4'-C1'-B1-B1pair":2, "cWH_CA_B1-B1pair-C1'pair-C4'pair":3, "cWH_CA_alpha_1":3, "cWH_CA_alpha_2":2, "cWH_CA_dB1":5, "cWH_CA_dB2":2,
"tWH_CA_tips_distance":5, "tWH_CA_C1'-B1-B1pair":1, "tWH_CA_B1-B1pair-C1'pair":1, "tWH_CA_C4'-C1'-B1-B1pair":2, "tWH_CA_B1-B1pair-C1'pair-C4'pair":2, "tWH_CA_alpha_1":3, "tWH_CA_alpha_2":1, "tWH_CA_dB1":3, "tWH_CA_dB2":2,
"cHW_CA_tips_distance":2, "cHW_CA_C1'-B1-B1pair":2, "cHW_CA_B1-B1pair-C1'pair":2, "cHW_CA_C4'-C1'-B1-B1pair":2, "cHW_CA_B1-B1pair-C1'pair-C4'pair":2, "cHW_CA_alpha_1":2, "cHW_CA_alpha_2":2, "cHW_CA_dB1":4, "cHW_CA_dB2":2,
"tHW_CA_tips_distance":2, "tHW_CA_C1'-B1-B1pair":2, "tHW_CA_B1-B1pair-C1'pair":2, "tHW_CA_C4'-C1'-B1-B1pair":2, "tHW_CA_B1-B1pair-C1'pair-C4'pair":2, "tHW_CA_alpha_1":2, "tHW_CA_alpha_2":2, "tHW_CA_dB1":6, "tHW_CA_dB2":2,
"cWS_CA_tips_distance":2, "cWS_CA_C1'-B1-B1pair":2, "cWS_CA_B1-B1pair-C1'pair":2, "cWS_CA_C4'-C1'-B1-B1pair":2, "cWS_CA_B1-B1pair-C1'pair-C4'pair":1, "cWS_CA_alpha_1":2, "cWS_CA_alpha_2":2, "cWS_CA_dB1":4, "cWS_CA_dB2":2,
"tWS_CA_tips_distance":5, "tWS_CA_C1'-B1-B1pair":3, "tWS_CA_B1-B1pair-C1'pair":1, "tWS_CA_C4'-C1'-B1-B1pair":3, "tWS_CA_B1-B1pair-C1'pair-C4'pair":2, "tWS_CA_alpha_1":3, "tWS_CA_alpha_2":1, "tWS_CA_dB1":1, "tWS_CA_dB2":1,
"cSW_CA_tips_distance":1, "cSW_CA_C1'-B1-B1pair":1, "cSW_CA_B1-B1pair-C1'pair":1, "cSW_CA_C4'-C1'-B1-B1pair":1, "cSW_CA_B1-B1pair-C1'pair-C4'pair":2, "cSW_CA_alpha_1":1, "cSW_CA_alpha_2":3, "cSW_CA_dB1":1, "cSW_CA_dB2":1,
"tSW_CA_tips_distance":3, "tSW_CA_C1'-B1-B1pair":2, "tSW_CA_B1-B1pair-C1'pair":2, "tSW_CA_C4'-C1'-B1-B1pair":1, "tSW_CA_B1-B1pair-C1'pair-C4'pair":1, "tSW_CA_alpha_1":2, "tSW_CA_alpha_2":3, "tSW_CA_dB1":3, "tSW_CA_dB2":1,
"cHH_CA_tips_distance":5, "cHH_CA_C1'-B1-B1pair":2, "cHH_CA_B1-B1pair-C1'pair":1, "cHH_CA_C4'-C1'-B1-B1pair":3, "cHH_CA_B1-B1pair-C1'pair-C4'pair":1, "cHH_CA_alpha_1":2, "cHH_CA_alpha_2":1, "cHH_CA_dB1":1, "cHH_CA_dB2":2,
"tHH_CA_tips_distance":1, "tHH_CA_C1'-B1-B1pair":2, "tHH_CA_B1-B1pair-C1'pair":2, "tHH_CA_C4'-C1'-B1-B1pair":3, "tHH_CA_B1-B1pair-C1'pair-C4'pair":3, "tHH_CA_alpha_1":2, "tHH_CA_alpha_2":1, "tHH_CA_dB1":3, "tHH_CA_dB2":5,
"cSH_CA_tips_distance":3, "cSH_CA_C1'-B1-B1pair":1, "cSH_CA_B1-B1pair-C1'pair":3, "cSH_CA_C4'-C1'-B1-B1pair":2, "cSH_CA_B1-B1pair-C1'pair-C4'pair":1, "cSH_CA_alpha_1":1, "cSH_CA_alpha_2":1, "cSH_CA_dB1":2, "cSH_CA_dB2":3,
"tSH_CA_tips_distance":2, "tSH_CA_C1'-B1-B1pair":1, "tSH_CA_B1-B1pair-C1'pair":2, "tSH_CA_C4'-C1'-B1-B1pair":2, "tSH_CA_B1-B1pair-C1'pair-C4'pair":2, "tSH_CA_alpha_1":3, "tSH_CA_alpha_2":2, "tSH_CA_dB1":6, "tSH_CA_dB2":4,
"cHS_CA_tips_distance":2, "cHS_CA_C1'-B1-B1pair":2, "cHS_CA_B1-B1pair-C1'pair":2, "cHS_CA_C4'-C1'-B1-B1pair":1, "cHS_CA_B1-B1pair-C1'pair-C4'pair":1, "cHS_CA_alpha_1":1, "cHS_CA_alpha_2":2, "cHS_CA_dB1":2, "cHS_CA_dB2":2,
"tHS_CA_tips_distance":3, "tHS_CA_C1'-B1-B1pair":2, "tHS_CA_B1-B1pair-C1'pair":1, "tHS_CA_C4'-C1'-B1-B1pair":2, "tHS_CA_B1-B1pair-C1'pair-C4'pair":2, "tHS_CA_alpha_1":3, "tHS_CA_alpha_2":3, "tHS_CA_dB1":2, "tHS_CA_dB2":1,
"cSS_CA_tips_distance":7, "cSS_CA_C1'-B1-B1pair":2, "cSS_CA_B1-B1pair-C1'pair":2, "cSS_CA_C4'-C1'-B1-B1pair":1, "cSS_CA_B1-B1pair-C1'pair-C4'pair":1, "cSS_CA_alpha_1":3, "cSS_CA_alpha_2":3, "cSS_CA_dB1":3, "cSS_CA_dB2":1,
"tSS_CA_tips_distance":5, "tSS_CA_C1'-B1-B1pair":2, "tSS_CA_B1-B1pair-C1'pair":2, "tSS_CA_C4'-C1'-B1-B1pair":2, "tSS_CA_B1-B1pair-C1'pair-C4'pair":1, "tSS_CA_alpha_1":2, "tSS_CA_alpha_2":2, "tSS_CA_dB1":4, "tSS_CA_dB2":2,
"cWW_CC_tips_distance":3, "cWW_CC_C1'-B1-B1pair":1, "cWW_CC_B1-B1pair-C1'pair":1, "cWW_CC_C4'-C1'-B1-B1pair":2, "cWW_CC_B1-B1pair-C1'pair-C4'pair":2, "cWW_CC_alpha_1":1, "cWW_CC_alpha_2":2, "cWW_CC_dB1":2, "cWW_CC_dB2":2,
"tWW_CC_tips_distance":6, "tWW_CC_C1'-B1-B1pair":3, "tWW_CC_B1-B1pair-C1'pair":3, "tWW_CC_C4'-C1'-B1-B1pair":3, "tWW_CC_B1-B1pair-C1'pair-C4'pair":3, "tWW_CC_alpha_1":2, "tWW_CC_alpha_2":2, "tWW_CC_dB1":6, "tWW_CC_dB2":3,
"cWH_CC_tips_distance":4, "cWH_CC_C1'-B1-B1pair":2, "cWH_CC_B1-B1pair-C1'pair":2, "cWH_CC_C4'-C1'-B1-B1pair":2, "cWH_CC_B1-B1pair-C1'pair-C4'pair":1, "cWH_CC_alpha_1":1, "cWH_CC_alpha_2":3, "cWH_CC_dB1":3, "cWH_CC_dB2":2,
"tWH_CC_tips_distance":1, "tWH_CC_C1'-B1-B1pair":1, "tWH_CC_B1-B1pair-C1'pair":3, "tWH_CC_C4'-C1'-B1-B1pair":2, "tWH_CC_B1-B1pair-C1'pair-C4'pair":1, "tWH_CC_alpha_1":3, "tWH_CC_alpha_2":1, "tWH_CC_dB1":3, "tWH_CC_dB2":3,
"cHW_CC_tips_distance":4, "cHW_CC_C1'-B1-B1pair":3, "cHW_CC_B1-B1pair-C1'pair":2, "cHW_CC_C4'-C1'-B1-B1pair":1, "cHW_CC_B1-B1pair-C1'pair-C4'pair":2, "cHW_CC_alpha_1":2, "cHW_CC_alpha_2":2, "cHW_CC_dB1":2, "cHW_CC_dB2":3,
"tHW_CC_tips_distance":2, "tHW_CC_C1'-B1-B1pair":1, "tHW_CC_B1-B1pair-C1'pair":3, "tHW_CC_C4'-C1'-B1-B1pair":3, "tHW_CC_B1-B1pair-C1'pair-C4'pair":2, "tHW_CC_alpha_1":2, "tHW_CC_alpha_2":2, "tHW_CC_dB1":3, "tHW_CC_dB2":3,
"cWS_CC_tips_distance":3, "cWS_CC_C1'-B1-B1pair":2, "cWS_CC_B1-B1pair-C1'pair":2, "cWS_CC_C4'-C1'-B1-B1pair":1, "cWS_CC_B1-B1pair-C1'pair-C4'pair":1, "cWS_CC_alpha_1":2, "cWS_CC_alpha_2":3, "cWS_CC_dB1":2, "cWS_CC_dB2":1,
"tWS_CC_tips_distance":5, "tWS_CC_C1'-B1-B1pair":2, "tWS_CC_B1-B1pair-C1'pair":2, "tWS_CC_C4'-C1'-B1-B1pair":2, "tWS_CC_B1-B1pair-C1'pair-C4'pair":1, "tWS_CC_alpha_1":2, "tWS_CC_alpha_2":2, "tWS_CC_dB1":2, "tWS_CC_dB2":2,
"cSW_CC_tips_distance":3, "cSW_CC_C1'-B1-B1pair":2, "cSW_CC_B1-B1pair-C1'pair":2, "cSW_CC_C4'-C1'-B1-B1pair":2, "cSW_CC_B1-B1pair-C1'pair-C4'pair":1, "cSW_CC_alpha_1":3, "cSW_CC_alpha_2":2, "cSW_CC_dB1":2, "cSW_CC_dB2":2,
"tSW_CC_tips_distance":5, "tSW_CC_C1'-B1-B1pair":1, "tSW_CC_B1-B1pair-C1'pair":2, "tSW_CC_C4'-C1'-B1-B1pair":1, "tSW_CC_B1-B1pair-C1'pair-C4'pair":2, "tSW_CC_alpha_1":1, "tSW_CC_alpha_2":2, "tSW_CC_dB1":3, "tSW_CC_dB2":2,
"cHH_CC_tips_distance":5, "cHH_CC_C1'-B1-B1pair":1, "cHH_CC_B1-B1pair-C1'pair":1, "cHH_CC_C4'-C1'-B1-B1pair":1, "cHH_CC_B1-B1pair-C1'pair-C4'pair":1, "cHH_CC_alpha_1":2, "cHH_CC_alpha_2":1, "cHH_CC_dB1":7, "cHH_CC_dB2":7,
"tHH_CC_tips_distance":5, "tHH_CC_C1'-B1-B1pair":3, "tHH_CC_B1-B1pair-C1'pair":2, "tHH_CC_C4'-C1'-B1-B1pair":3, "tHH_CC_B1-B1pair-C1'pair-C4'pair":2, "tHH_CC_alpha_1":1, "tHH_CC_alpha_2":3, "tHH_CC_dB1":5, "tHH_CC_dB2":5,
"cSH_CC_tips_distance":3, "cSH_CC_C1'-B1-B1pair":2, "cSH_CC_B1-B1pair-C1'pair":2, "cSH_CC_C4'-C1'-B1-B1pair":2, "cSH_CC_B1-B1pair-C1'pair-C4'pair":2, "cSH_CC_alpha_1":3, "cSH_CC_alpha_2":2, "cSH_CC_dB1":5, "cSH_CC_dB2":2,
"tSH_CC_tips_distance":5, "tSH_CC_C1'-B1-B1pair":2, "tSH_CC_B1-B1pair-C1'pair":1, "tSH_CC_C4'-C1'-B1-B1pair":2, "tSH_CC_B1-B1pair-C1'pair-C4'pair":2, "tSH_CC_alpha_1":3, "tSH_CC_alpha_2":1, "tSH_CC_dB1":4, "tSH_CC_dB2":2,
"cHS_CC_tips_distance":3, "cHS_CC_C1'-B1-B1pair":2, "cHS_CC_B1-B1pair-C1'pair":2, "cHS_CC_C4'-C1'-B1-B1pair":2, "cHS_CC_B1-B1pair-C1'pair-C4'pair":2, "cHS_CC_alpha_1":3, "cHS_CC_alpha_2":2, "cHS_CC_dB1":2, "cHS_CC_dB2":2,
"tHS_CC_tips_distance":5, "tHS_CC_C1'-B1-B1pair":3, "tHS_CC_B1-B1pair-C1'pair":1, "tHS_CC_C4'-C1'-B1-B1pair":2, "tHS_CC_B1-B1pair-C1'pair-C4'pair":3, "tHS_CC_alpha_1":1, "tHS_CC_alpha_2":2, "tHS_CC_dB1":4, "tHS_CC_dB2":4,
"cSS_CC_tips_distance":5, "cSS_CC_C1'-B1-B1pair":2, "cSS_CC_B1-B1pair-C1'pair":2, "cSS_CC_C4'-C1'-B1-B1pair":2, "cSS_CC_B1-B1pair-C1'pair-C4'pair":1, "cSS_CC_alpha_1":1, "cSS_CC_alpha_2":3, "cSS_CC_dB1":1, "cSS_CC_dB2":3,
"tSS_CC_tips_distance":5, "tSS_CC_C1'-B1-B1pair":2, "tSS_CC_B1-B1pair-C1'pair":2, "tSS_CC_C4'-C1'-B1-B1pair":3, "tSS_CC_B1-B1pair-C1'pair-C4'pair":2, "tSS_CC_alpha_1":3, "tSS_CC_alpha_2":2, "tSS_CC_dB1":2, "tSS_CC_dB2":1,
"cWW_CG_tips_distance":5, "cWW_CG_C1'-B1-B1pair":2, "cWW_CG_B1-B1pair-C1'pair":1, "cWW_CG_C4'-C1'-B1-B1pair":2, "cWW_CG_B1-B1pair-C1'pair-C4'pair":2, "cWW_CG_alpha_1":2, "cWW_CG_alpha_2":3, "cWW_CG_dB1":2, "cWW_CG_dB2":2,
"tWW_CG_tips_distance":3, "tWW_CG_C1'-B1-B1pair":1, "tWW_CG_B1-B1pair-C1'pair":2, "tWW_CG_C4'-C1'-B1-B1pair":2, "tWW_CG_B1-B1pair-C1'pair-C4'pair":2, "tWW_CG_alpha_1":2, "tWW_CG_alpha_2":1, "tWW_CG_dB1":1, "tWW_CG_dB2":4,
"cWH_CG_tips_distance":3, "cWH_CG_C1'-B1-B1pair":1, "cWH_CG_B1-B1pair-C1'pair":1, "cWH_CG_C4'-C1'-B1-B1pair":2, "cWH_CG_B1-B1pair-C1'pair-C4'pair":2, "cWH_CG_alpha_1":2, "cWH_CG_alpha_2":1, "cWH_CG_dB1":4, "cWH_CG_dB2":2,
"tWH_CG_tips_distance":4, "tWH_CG_C1'-B1-B1pair":2, "tWH_CG_B1-B1pair-C1'pair":1, "tWH_CG_C4'-C1'-B1-B1pair":2, "tWH_CG_B1-B1pair-C1'pair-C4'pair":3, "tWH_CG_alpha_1":2, "tWH_CG_alpha_2":1, "tWH_CG_dB1":3, "tWH_CG_dB2":2,
"cHW_CG_tips_distance":3, "cHW_CG_C1'-B1-B1pair":2, "cHW_CG_B1-B1pair-C1'pair":2, "cHW_CG_C4'-C1'-B1-B1pair":1, "cHW_CG_B1-B1pair-C1'pair-C4'pair":2, "cHW_CG_alpha_1":1, "cHW_CG_alpha_2":2, "cHW_CG_dB1":2, "cHW_CG_dB2":2,
"tHW_CG_tips_distance":5, "tHW_CG_C1'-B1-B1pair":1, "tHW_CG_B1-B1pair-C1'pair":2, "tHW_CG_C4'-C1'-B1-B1pair":1, "tHW_CG_B1-B1pair-C1'pair-C4'pair":2, "tHW_CG_alpha_1":3, "tHW_CG_alpha_2":2, "tHW_CG_dB1":4, "tHW_CG_dB2":3,
"cWS_CG_tips_distance":2, "cWS_CG_C1'-B1-B1pair":1, "cWS_CG_B1-B1pair-C1'pair":1, "cWS_CG_C4'-C1'-B1-B1pair":2, "cWS_CG_B1-B1pair-C1'pair-C4'pair":1, "cWS_CG_alpha_1":1, "cWS_CG_alpha_2":2, "cWS_CG_dB1":2, "cWS_CG_dB2":3,
"tWS_CG_tips_distance":2, "tWS_CG_C1'-B1-B1pair":3, "tWS_CG_B1-B1pair-C1'pair":1, "tWS_CG_C4'-C1'-B1-B1pair":2, "tWS_CG_B1-B1pair-C1'pair-C4'pair":1, "tWS_CG_alpha_1":2, "tWS_CG_alpha_2":1, "tWS_CG_dB1":2, "tWS_CG_dB2":4,
"cSW_CG_tips_distance":7, "cSW_CG_C1'-B1-B1pair":1, "cSW_CG_B1-B1pair-C1'pair":2, "cSW_CG_C4'-C1'-B1-B1pair":2, "cSW_CG_B1-B1pair-C1'pair-C4'pair":3, "cSW_CG_alpha_1":1, "cSW_CG_alpha_2":2, "cSW_CG_dB1":1, "cSW_CG_dB2":3,
"tSW_CG_tips_distance":4, "tSW_CG_C1'-B1-B1pair":1, "tSW_CG_B1-B1pair-C1'pair":2, "tSW_CG_C4'-C1'-B1-B1pair":3, "tSW_CG_B1-B1pair-C1'pair-C4'pair":2, "tSW_CG_alpha_1":1, "tSW_CG_alpha_2":2, "tSW_CG_dB1":7, "tSW_CG_dB2":2,
"cHH_CG_tips_distance":1, "cHH_CG_C1'-B1-B1pair":1, "cHH_CG_B1-B1pair-C1'pair":2, "cHH_CG_C4'-C1'-B1-B1pair":3, "cHH_CG_B1-B1pair-C1'pair-C4'pair":2, "cHH_CG_alpha_1":1, "cHH_CG_alpha_2":2, "cHH_CG_dB1":4, "cHH_CG_dB2":1,
"tHH_CG_tips_distance":8, "tHH_CG_C1'-B1-B1pair":2, "tHH_CG_B1-B1pair-C1'pair":2, "tHH_CG_C4'-C1'-B1-B1pair":3, "tHH_CG_B1-B1pair-C1'pair-C4'pair":2, "tHH_CG_alpha_1":2, "tHH_CG_alpha_2":3, "tHH_CG_dB1":3, "tHH_CG_dB2":4,
"cSH_CG_tips_distance":5, "cSH_CG_C1'-B1-B1pair":1, "cSH_CG_B1-B1pair-C1'pair":2, "cSH_CG_C4'-C1'-B1-B1pair":2, "cSH_CG_B1-B1pair-C1'pair-C4'pair":2, "cSH_CG_alpha_1":1, "cSH_CG_alpha_2":2, "cSH_CG_dB1":6, "cSH_CG_dB2":4,
"tSH_CG_tips_distance":5, "tSH_CG_C1'-B1-B1pair":1, "tSH_CG_B1-B1pair-C1'pair":2, "tSH_CG_C4'-C1'-B1-B1pair":2, "tSH_CG_B1-B1pair-C1'pair-C4'pair":1, "tSH_CG_alpha_1":1, "tSH_CG_alpha_2":3, "tSH_CG_dB1":2, "tSH_CG_dB2":3,
"cHS_CG_tips_distance":4, "cHS_CG_C1'-B1-B1pair":2, "cHS_CG_B1-B1pair-C1'pair":2, "cHS_CG_C4'-C1'-B1-B1pair":3, "cHS_CG_B1-B1pair-C1'pair-C4'pair":2, "cHS_CG_alpha_1":2, "cHS_CG_alpha_2":3, "cHS_CG_dB1":5, "cHS_CG_dB2":2,
"tHS_CG_tips_distance":4, "tHS_CG_C1'-B1-B1pair":1, "tHS_CG_B1-B1pair-C1'pair":2, "tHS_CG_C4'-C1'-B1-B1pair":3, "tHS_CG_B1-B1pair-C1'pair-C4'pair":1, "tHS_CG_alpha_1":1, "tHS_CG_alpha_2":1, "tHS_CG_dB1":3, "tHS_CG_dB2":2,
"cSS_CG_tips_distance":1, "cSS_CG_C1'-B1-B1pair":2, "cSS_CG_B1-B1pair-C1'pair":1, "cSS_CG_C4'-C1'-B1-B1pair":2, "cSS_CG_B1-B1pair-C1'pair-C4'pair":1, "cSS_CG_alpha_1":1, "cSS_CG_alpha_2":2, "cSS_CG_dB1":3, "cSS_CG_dB2":3,
"tSS_CG_tips_distance":5, "tSS_CG_C1'-B1-B1pair":2, "tSS_CG_B1-B1pair-C1'pair":2, "tSS_CG_C4'-C1'-B1-B1pair":1, "tSS_CG_B1-B1pair-C1'pair-C4'pair":2, "tSS_CG_alpha_1":1, "tSS_CG_alpha_2":2, "tSS_CG_dB1":1, "tSS_CG_dB2":2,
"cWW_CU_tips_distance":4, "cWW_CU_C1'-B1-B1pair":1, "cWW_CU_B1-B1pair-C1'pair":1, "cWW_CU_C4'-C1'-B1-B1pair":2, "cWW_CU_B1-B1pair-C1'pair-C4'pair":2, "cWW_CU_alpha_1":1, "cWW_CU_alpha_2":1, "cWW_CU_dB1":1, "cWW_CU_dB2":1,
"tWW_CU_tips_distance":1, "tWW_CU_C1'-B1-B1pair":2, "tWW_CU_B1-B1pair-C1'pair":2, "tWW_CU_C4'-C1'-B1-B1pair":2, "tWW_CU_B1-B1pair-C1'pair-C4'pair":2, "tWW_CU_alpha_1":1, "tWW_CU_alpha_2":2, "tWW_CU_dB1":2, "tWW_CU_dB2":1,
"cWH_CU_tips_distance":5, "cWH_CU_C1'-B1-B1pair":2, "cWH_CU_B1-B1pair-C1'pair":2, "cWH_CU_C4'-C1'-B1-B1pair":2, "cWH_CU_B1-B1pair-C1'pair-C4'pair":2, "cWH_CU_alpha_1":3, "cWH_CU_alpha_2":2, "cWH_CU_dB1":3, "cWH_CU_dB2":1,
"tWH_CU_tips_distance":1, "tWH_CU_C1'-B1-B1pair":2, "tWH_CU_B1-B1pair-C1'pair":2, "tWH_CU_C4'-C1'-B1-B1pair":3, "tWH_CU_B1-B1pair-C1'pair-C4'pair":2, "tWH_CU_alpha_1":3, "tWH_CU_alpha_2":3, "tWH_CU_dB1":5, "tWH_CU_dB2":2,
"cHW_CU_tips_distance":3, "cHW_CU_C1'-B1-B1pair":2, "cHW_CU_B1-B1pair-C1'pair":2, "cHW_CU_C4'-C1'-B1-B1pair":1, "cHW_CU_B1-B1pair-C1'pair-C4'pair":3, "cHW_CU_alpha_1":2, "cHW_CU_alpha_2":2, "cHW_CU_dB1":1, "cHW_CU_dB2":3,
"tHW_CU_tips_distance":8, "tHW_CU_C1'-B1-B1pair":1, "tHW_CU_B1-B1pair-C1'pair":1, "tHW_CU_C4'-C1'-B1-B1pair":3, "tHW_CU_B1-B1pair-C1'pair-C4'pair":2, "tHW_CU_alpha_1":1, "tHW_CU_alpha_2":2, "tHW_CU_dB1":3, "tHW_CU_dB2":3,
"cWS_CU_tips_distance":4, "cWS_CU_C1'-B1-B1pair":1, "cWS_CU_B1-B1pair-C1'pair":2, "cWS_CU_C4'-C1'-B1-B1pair":3, "cWS_CU_B1-B1pair-C1'pair-C4'pair":2, "cWS_CU_alpha_1":3, "cWS_CU_alpha_2":2, "cWS_CU_dB1":4, "cWS_CU_dB2":2,
"tWS_CU_tips_distance":5, "tWS_CU_C1'-B1-B1pair":3, "tWS_CU_B1-B1pair-C1'pair":1, "tWS_CU_C4'-C1'-B1-B1pair":2, "tWS_CU_B1-B1pair-C1'pair-C4'pair":2, "tWS_CU_alpha_1":2, "tWS_CU_alpha_2":1, "tWS_CU_dB1":3, "tWS_CU_dB2":5,
"cSW_CU_tips_distance":3, "cSW_CU_C1'-B1-B1pair":2, "cSW_CU_B1-B1pair-C1'pair":2, "cSW_CU_C4'-C1'-B1-B1pair":2, "cSW_CU_B1-B1pair-C1'pair-C4'pair":3, "cSW_CU_alpha_1":3, "cSW_CU_alpha_2":3, "cSW_CU_dB1":2, "cSW_CU_dB2":4,
"tSW_CU_tips_distance":7, "tSW_CU_C1'-B1-B1pair":2, "tSW_CU_B1-B1pair-C1'pair":2, "tSW_CU_C4'-C1'-B1-B1pair":2, "tSW_CU_B1-B1pair-C1'pair-C4'pair":2, "tSW_CU_alpha_1":2, "tSW_CU_alpha_2":2, "tSW_CU_dB1":2, "tSW_CU_dB2":2,
"cHH_CU_tips_distance":6, "cHH_CU_C1'-B1-B1pair":2, "cHH_CU_B1-B1pair-C1'pair":1, "cHH_CU_C4'-C1'-B1-B1pair":2, "cHH_CU_B1-B1pair-C1'pair-C4'pair":3, "cHH_CU_alpha_1":1, "cHH_CU_alpha_2":1, "cHH_CU_dB1":2, "cHH_CU_dB2":4,
"tHH_CU_tips_distance":5, "tHH_CU_C1'-B1-B1pair":3, "tHH_CU_B1-B1pair-C1'pair":2, "tHH_CU_C4'-C1'-B1-B1pair":2, "tHH_CU_B1-B1pair-C1'pair-C4'pair":1, "tHH_CU_alpha_1":2, "tHH_CU_alpha_2":2, "tHH_CU_dB1":2, "tHH_CU_dB2":2,
"cSH_CU_tips_distance":5, "cSH_CU_C1'-B1-B1pair":2, "cSH_CU_B1-B1pair-C1'pair":2, "cSH_CU_C4'-C1'-B1-B1pair":2, "cSH_CU_B1-B1pair-C1'pair-C4'pair":1, "cSH_CU_alpha_1":1, "cSH_CU_alpha_2":1, "cSH_CU_dB1":4, "cSH_CU_dB2":2,
"tSH_CU_tips_distance":5, "tSH_CU_C1'-B1-B1pair":2, "tSH_CU_B1-B1pair-C1'pair":3, "tSH_CU_C4'-C1'-B1-B1pair":2, "tSH_CU_B1-B1pair-C1'pair-C4'pair":2, "tSH_CU_alpha_1":3, "tSH_CU_alpha_2":3, "tSH_CU_dB1":4, "tSH_CU_dB2":2,
"cHS_CU_tips_distance":2, "cHS_CU_C1'-B1-B1pair":1, "cHS_CU_B1-B1pair-C1'pair":2, "cHS_CU_C4'-C1'-B1-B1pair":2, "cHS_CU_B1-B1pair-C1'pair-C4'pair":2, "cHS_CU_alpha_1":1, "cHS_CU_alpha_2":2, "cHS_CU_dB1":2, "cHS_CU_dB2":4,
"tHS_CU_tips_distance":8, "tHS_CU_C1'-B1-B1pair":2, "tHS_CU_B1-B1pair-C1'pair":1, "tHS_CU_C4'-C1'-B1-B1pair":2, "tHS_CU_B1-B1pair-C1'pair-C4'pair":2, "tHS_CU_alpha_1":2, "tHS_CU_alpha_2":2, "tHS_CU_dB1":3, "tHS_CU_dB2":4,
"cSS_CU_tips_distance":5, "cSS_CU_C1'-B1-B1pair":2, "cSS_CU_B1-B1pair-C1'pair":2, "cSS_CU_C4'-C1'-B1-B1pair":1, "cSS_CU_B1-B1pair-C1'pair-C4'pair":1, "cSS_CU_alpha_1":2, "cSS_CU_alpha_2":3, "cSS_CU_dB1":6, "cSS_CU_dB2":1,
"tSS_CU_tips_distance":5, "tSS_CU_C1'-B1-B1pair":2, "tSS_CU_B1-B1pair-C1'pair":3, "tSS_CU_C4'-C1'-B1-B1pair":2, "tSS_CU_B1-B1pair-C1'pair-C4'pair":2, "tSS_CU_alpha_1":3, "tSS_CU_alpha_2":3, "tSS_CU_dB1":7, "tSS_CU_dB2":2,
"cWW_GA_tips_distance":5, "cWW_GA_C1'-B1-B1pair":1, "cWW_GA_B1-B1pair-C1'pair":1, "cWW_GA_C4'-C1'-B1-B1pair":2, "cWW_GA_B1-B1pair-C1'pair-C4'pair":2, "cWW_GA_alpha_1":1, "cWW_GA_alpha_2":1, "cWW_GA_dB1":2, "cWW_GA_dB2":1,
"tWW_GA_tips_distance":6, "tWW_GA_C1'-B1-B1pair":1, "tWW_GA_B1-B1pair-C1'pair":1, "tWW_GA_C4'-C1'-B1-B1pair":1, "tWW_GA_B1-B1pair-C1'pair-C4'pair":2, "tWW_GA_alpha_1":2, "tWW_GA_alpha_2":2, "tWW_GA_dB1":1, "tWW_GA_dB2":2,
"cWH_GA_tips_distance":2, "cWH_GA_C1'-B1-B1pair":1, "cWH_GA_B1-B1pair-C1'pair":1, "cWH_GA_C4'-C1'-B1-B1pair":3, "cWH_GA_B1-B1pair-C1'pair-C4'pair":2, "cWH_GA_alpha_1":2, "cWH_GA_alpha_2":1, "cWH_GA_dB1":2, "cWH_GA_dB2":2,
"tWH_GA_tips_distance":7, "tWH_GA_C1'-B1-B1pair":1, "tWH_GA_B1-B1pair-C1'pair":2, "tWH_GA_C4'-C1'-B1-B1pair":1, "tWH_GA_B1-B1pair-C1'pair-C4'pair":2, "tWH_GA_alpha_1":2, "tWH_GA_alpha_2":2, "tWH_GA_dB1":1, "tWH_GA_dB2":6,
"cHW_GA_tips_distance":4, "cHW_GA_C1'-B1-B1pair":2, "cHW_GA_B1-B1pair-C1'pair":2, "cHW_GA_C4'-C1'-B1-B1pair":2, "cHW_GA_B1-B1pair-C1'pair-C4'pair":3, "cHW_GA_alpha_1":1, "cHW_GA_alpha_2":2, "cHW_GA_dB1":1, "cHW_GA_dB2":4,
"tHW_GA_tips_distance":3, "tHW_GA_C1'-B1-B1pair":2, "tHW_GA_B1-B1pair-C1'pair":1, "tHW_GA_C4'-C1'-B1-B1pair":2, "tHW_GA_B1-B1pair-C1'pair-C4'pair":2, "tHW_GA_alpha_1":1, "tHW_GA_alpha_2":2, "tHW_GA_dB1":3, "tHW_GA_dB2":1,
"cWS_GA_tips_distance":6, "cWS_GA_C1'-B1-B1pair":3, "cWS_GA_B1-B1pair-C1'pair":2, "cWS_GA_C4'-C1'-B1-B1pair":2, "cWS_GA_B1-B1pair-C1'pair-C4'pair":1, "cWS_GA_alpha_1":2, "cWS_GA_alpha_2":3, "cWS_GA_dB1":3, "cWS_GA_dB2":4,
"tWS_GA_tips_distance":5, "tWS_GA_C1'-B1-B1pair":3, "tWS_GA_B1-B1pair-C1'pair":2, "tWS_GA_C4'-C1'-B1-B1pair":1, "tWS_GA_B1-B1pair-C1'pair-C4'pair":1, "tWS_GA_alpha_1":2, "tWS_GA_alpha_2":2, "tWS_GA_dB1":2, "tWS_GA_dB2":5,
"cSW_GA_tips_distance":4, "cSW_GA_C1'-B1-B1pair":1, "cSW_GA_B1-B1pair-C1'pair":1, "cSW_GA_C4'-C1'-B1-B1pair":1, "cSW_GA_B1-B1pair-C1'pair-C4'pair":1, "cSW_GA_alpha_1":1, "cSW_GA_alpha_2":2, "cSW_GA_dB1":1, "cSW_GA_dB2":2,
"tSW_GA_tips_distance":2, "tSW_GA_C1'-B1-B1pair":1, "tSW_GA_B1-B1pair-C1'pair":2, "tSW_GA_C4'-C1'-B1-B1pair":1, "tSW_GA_B1-B1pair-C1'pair-C4'pair":2, "tSW_GA_alpha_1":1, "tSW_GA_alpha_2":3, "tSW_GA_dB1":2, "tSW_GA_dB2":2,
"cHH_GA_tips_distance":3, "cHH_GA_C1'-B1-B1pair":2, "cHH_GA_B1-B1pair-C1'pair":2, "cHH_GA_C4'-C1'-B1-B1pair":2, "cHH_GA_B1-B1pair-C1'pair-C4'pair":2, "cHH_GA_alpha_1":2, "cHH_GA_alpha_2":3, "cHH_GA_dB1":2, "cHH_GA_dB2":3,
"tHH_GA_tips_distance":3, "tHH_GA_C1'-B1-B1pair":3, "tHH_GA_B1-B1pair-C1'pair":2, "tHH_GA_C4'-C1'-B1-B1pair":2, "tHH_GA_B1-B1pair-C1'pair-C4'pair":2, "tHH_GA_alpha_1":1, "tHH_GA_alpha_2":2, "tHH_GA_dB1":3, "tHH_GA_dB2":2,
"cSH_GA_tips_distance":1, "cSH_GA_C1'-B1-B1pair":2, "cSH_GA_B1-B1pair-C1'pair":2, "cSH_GA_C4'-C1'-B1-B1pair":2, "cSH_GA_B1-B1pair-C1'pair-C4'pair":2, "cSH_GA_alpha_1":1, "cSH_GA_alpha_2":2, "cSH_GA_dB1":2, "cSH_GA_dB2":1,
"tSH_GA_tips_distance":3, "tSH_GA_C1'-B1-B1pair":1, "tSH_GA_B1-B1pair-C1'pair":1, "tSH_GA_C4'-C1'-B1-B1pair":2, "tSH_GA_B1-B1pair-C1'pair-C4'pair":2, "tSH_GA_alpha_1":2, "tSH_GA_alpha_2":2, "tSH_GA_dB1":2, "tSH_GA_dB2":7,
"cHS_GA_tips_distance":5, "cHS_GA_C1'-B1-B1pair":3, "cHS_GA_B1-B1pair-C1'pair":3, "cHS_GA_C4'-C1'-B1-B1pair":3, "cHS_GA_B1-B1pair-C1'pair-C4'pair":2, "cHS_GA_alpha_1":2, "cHS_GA_alpha_2":2, "cHS_GA_dB1":3, "cHS_GA_dB2":4,
"tHS_GA_tips_distance":5, "tHS_GA_C1'-B1-B1pair":3, "tHS_GA_B1-B1pair-C1'pair":1, "tHS_GA_C4'-C1'-B1-B1pair":3, "tHS_GA_B1-B1pair-C1'pair-C4'pair":2, "tHS_GA_alpha_1":2, "tHS_GA_alpha_2":1, "tHS_GA_dB1":1, "tHS_GA_dB2":2,
"cSS_GA_tips_distance":4, "cSS_GA_C1'-B1-B1pair":3, "cSS_GA_B1-B1pair-C1'pair":2, "cSS_GA_C4'-C1'-B1-B1pair":1, "cSS_GA_B1-B1pair-C1'pair-C4'pair":1, "cSS_GA_alpha_1":2, "cSS_GA_alpha_2":1, "cSS_GA_dB1":1, "cSS_GA_dB2":1,
"tSS_GA_tips_distance":4, "tSS_GA_C1'-B1-B1pair":1, "tSS_GA_B1-B1pair-C1'pair":1, "tSS_GA_C4'-C1'-B1-B1pair":1, "tSS_GA_B1-B1pair-C1'pair-C4'pair":1, "tSS_GA_alpha_1":1, "tSS_GA_alpha_2":2, "tSS_GA_dB1":5, "tSS_GA_dB2":2,
"cWW_GC_tips_distance":5, "cWW_GC_C1'-B1-B1pair":1, "cWW_GC_B1-B1pair-C1'pair":2, "cWW_GC_C4'-C1'-B1-B1pair":2, "cWW_GC_B1-B1pair-C1'pair-C4'pair":2, "cWW_GC_alpha_1":2, "cWW_GC_alpha_2":1, "cWW_GC_dB1":2, "cWW_GC_dB2":3,
"tWW_GC_tips_distance":3, "tWW_GC_C1'-B1-B1pair":1, "tWW_GC_B1-B1pair-C1'pair":2, "tWW_GC_C4'-C1'-B1-B1pair":2, "tWW_GC_B1-B1pair-C1'pair-C4'pair":2, "tWW_GC_alpha_1":1, "tWW_GC_alpha_2":2, "tWW_GC_dB1":3, "tWW_GC_dB2":4,
"cWH_GC_tips_distance":7, "cWH_GC_C1'-B1-B1pair":2, "cWH_GC_B1-B1pair-C1'pair":2, "cWH_GC_C4'-C1'-B1-B1pair":2, "cWH_GC_B1-B1pair-C1'pair-C4'pair":1, "cWH_GC_alpha_1":2, "cWH_GC_alpha_2":2, "cWH_GC_dB1":2, "cWH_GC_dB2":3,
"tWH_GC_tips_distance":5, "tWH_GC_C1'-B1-B1pair":1, "tWH_GC_B1-B1pair-C1'pair":1, "tWH_GC_C4'-C1'-B1-B1pair":2, "tWH_GC_B1-B1pair-C1'pair-C4'pair":2, "tWH_GC_alpha_1":3, "tWH_GC_alpha_2":3, "tWH_GC_dB1":2, "tWH_GC_dB2":2,
"cHW_GC_tips_distance":4, "cHW_GC_C1'-B1-B1pair":1, "cHW_GC_B1-B1pair-C1'pair":1, "cHW_GC_C4'-C1'-B1-B1pair":2, "cHW_GC_B1-B1pair-C1'pair-C4'pair":2, "cHW_GC_alpha_1":1, "cHW_GC_alpha_2":1, "cHW_GC_dB1":3, "cHW_GC_dB2":4,
"tHW_GC_tips_distance":5, "tHW_GC_C1'-B1-B1pair":2, "tHW_GC_B1-B1pair-C1'pair":2, "tHW_GC_C4'-C1'-B1-B1pair":2, "tHW_GC_B1-B1pair-C1'pair-C4'pair":2, "tHW_GC_alpha_1":2, "tHW_GC_alpha_2":2, "tHW_GC_dB1":2, "tHW_GC_dB2":4,
"cWS_GC_tips_distance":8, "cWS_GC_C1'-B1-B1pair":1, "cWS_GC_B1-B1pair-C1'pair":1, "cWS_GC_C4'-C1'-B1-B1pair":2, "cWS_GC_B1-B1pair-C1'pair-C4'pair":2, "cWS_GC_alpha_1":2, "cWS_GC_alpha_2":1, "cWS_GC_dB1":2, "cWS_GC_dB2":1,
"tWS_GC_tips_distance":2, "tWS_GC_C1'-B1-B1pair":1, "tWS_GC_B1-B1pair-C1'pair":1, "tWS_GC_C4'-C1'-B1-B1pair":3, "tWS_GC_B1-B1pair-C1'pair-C4'pair":2, "tWS_GC_alpha_1":2, "tWS_GC_alpha_2":1, "tWS_GC_dB1":4, "tWS_GC_dB2":5,
"cSW_GC_tips_distance":4, "cSW_GC_C1'-B1-B1pair":2, "cSW_GC_B1-B1pair-C1'pair":3, "cSW_GC_C4'-C1'-B1-B1pair":1, "cSW_GC_B1-B1pair-C1'pair-C4'pair":2, "cSW_GC_alpha_1":3, "cSW_GC_alpha_2":2, "cSW_GC_dB1":3, "cSW_GC_dB2":2,
"tSW_GC_tips_distance":2, "tSW_GC_C1'-B1-B1pair":1, "tSW_GC_B1-B1pair-C1'pair":3, "tSW_GC_C4'-C1'-B1-B1pair":1, "tSW_GC_B1-B1pair-C1'pair-C4'pair":2, "tSW_GC_alpha_1":2, "tSW_GC_alpha_2":2, "tSW_GC_dB1":4, "tSW_GC_dB2":2,
"cHH_GC_tips_distance":1, "cHH_GC_C1'-B1-B1pair":3, "cHH_GC_B1-B1pair-C1'pair":1, "cHH_GC_C4'-C1'-B1-B1pair":2, "cHH_GC_B1-B1pair-C1'pair-C4'pair":1, "cHH_GC_alpha_1":2, "cHH_GC_alpha_2":2, "cHH_GC_dB1":3, "cHH_GC_dB2":3,
"tHH_GC_tips_distance":8, "tHH_GC_C1'-B1-B1pair":2, "tHH_GC_B1-B1pair-C1'pair":1, "tHH_GC_C4'-C1'-B1-B1pair":2, "tHH_GC_B1-B1pair-C1'pair-C4'pair":2, "tHH_GC_alpha_1":3, "tHH_GC_alpha_2":1, "tHH_GC_dB1":6, "tHH_GC_dB2":3,
"cSH_GC_tips_distance":8, "cSH_GC_C1'-B1-B1pair":2, "cSH_GC_B1-B1pair-C1'pair":3, "cSH_GC_C4'-C1'-B1-B1pair":1, "cSH_GC_B1-B1pair-C1'pair-C4'pair":3, "cSH_GC_alpha_1":2, "cSH_GC_alpha_2":2, "cSH_GC_dB1":5, "cSH_GC_dB2":4,
"tSH_GC_tips_distance":4, "tSH_GC_C1'-B1-B1pair":1, "tSH_GC_B1-B1pair-C1'pair":2, "tSH_GC_C4'-C1'-B1-B1pair":1, "tSH_GC_B1-B1pair-C1'pair-C4'pair":4, "tSH_GC_alpha_1":1, "tSH_GC_alpha_2":2, "tSH_GC_dB1":2, "tSH_GC_dB2":3,
"cHS_GC_tips_distance":5, "cHS_GC_C1'-B1-B1pair":2, "cHS_GC_B1-B1pair-C1'pair":2, "cHS_GC_C4'-C1'-B1-B1pair":2, "cHS_GC_B1-B1pair-C1'pair-C4'pair":2, "cHS_GC_alpha_1":3, "cHS_GC_alpha_2":1, "cHS_GC_dB1":2, "cHS_GC_dB2":5,
"tHS_GC_tips_distance":5, "tHS_GC_C1'-B1-B1pair":2, "tHS_GC_B1-B1pair-C1'pair":2, "tHS_GC_C4'-C1'-B1-B1pair":2, "tHS_GC_B1-B1pair-C1'pair-C4'pair":3, "tHS_GC_alpha_1":2, "tHS_GC_alpha_2":2, "tHS_GC_dB1":2, "tHS_GC_dB2":2,
"cSS_GC_tips_distance":2, "cSS_GC_C1'-B1-B1pair":2, "cSS_GC_B1-B1pair-C1'pair":2, "cSS_GC_C4'-C1'-B1-B1pair":1, "cSS_GC_B1-B1pair-C1'pair-C4'pair":1, "cSS_GC_alpha_1":2, "cSS_GC_alpha_2":3, "cSS_GC_dB1":3, "cSS_GC_dB2":3,
"tSS_GC_tips_distance":5, "tSS_GC_C1'-B1-B1pair":2, "tSS_GC_B1-B1pair-C1'pair":2, "tSS_GC_C4'-C1'-B1-B1pair":1, "tSS_GC_B1-B1pair-C1'pair-C4'pair":2, "tSS_GC_alpha_1":2, "tSS_GC_alpha_2":3, "tSS_GC_dB1":2, "tSS_GC_dB2":1,
"cWW_GG_tips_distance":3, "cWW_GG_C1'-B1-B1pair":1, "cWW_GG_B1-B1pair-C1'pair":1, "cWW_GG_C4'-C1'-B1-B1pair":2, "cWW_GG_B1-B1pair-C1'pair-C4'pair":1, "cWW_GG_alpha_1":1, "cWW_GG_alpha_2":2, "cWW_GG_dB1":2, "cWW_GG_dB2":2,
"tWW_GG_tips_distance":4, "tWW_GG_C1'-B1-B1pair":1, "tWW_GG_B1-B1pair-C1'pair":1, "tWW_GG_C4'-C1'-B1-B1pair":2, "tWW_GG_B1-B1pair-C1'pair-C4'pair":2, "tWW_GG_alpha_1":2, "tWW_GG_alpha_2":2, "tWW_GG_dB1":1, "tWW_GG_dB2":2,
"cWH_GG_tips_distance":2, "cWH_GG_C1'-B1-B1pair":2, "cWH_GG_B1-B1pair-C1'pair":2, "cWH_GG_C4'-C1'-B1-B1pair":2, "cWH_GG_B1-B1pair-C1'pair-C4'pair":2, "cWH_GG_alpha_1":2, "cWH_GG_alpha_2":2, "cWH_GG_dB1":4, "cWH_GG_dB2":3,
"tWH_GG_tips_distance":2, "tWH_GG_C1'-B1-B1pair":1, "tWH_GG_B1-B1pair-C1'pair":2, "tWH_GG_C4'-C1'-B1-B1pair":2, "tWH_GG_B1-B1pair-C1'pair-C4'pair":2, "tWH_GG_alpha_1":2, "tWH_GG_alpha_2":2, "tWH_GG_dB1":2, "tWH_GG_dB2":3,
"cHW_GG_tips_distance":3, "cHW_GG_C1'-B1-B1pair":2, "cHW_GG_B1-B1pair-C1'pair":2, "cHW_GG_C4'-C1'-B1-B1pair":2, "cHW_GG_B1-B1pair-C1'pair-C4'pair":2, "cHW_GG_alpha_1":1, "cHW_GG_alpha_2":1, "cHW_GG_dB1":2, "cHW_GG_dB2":2,
"tHW_GG_tips_distance":4, "tHW_GG_C1'-B1-B1pair":2, "tHW_GG_B1-B1pair-C1'pair":2, "tHW_GG_C4'-C1'-B1-B1pair":1, "tHW_GG_B1-B1pair-C1'pair-C4'pair":2, "tHW_GG_alpha_1":2, "tHW_GG_alpha_2":2, "tHW_GG_dB1":1, "tHW_GG_dB2":4,
"cWS_GG_tips_distance":2, "cWS_GG_C1'-B1-B1pair":1, "cWS_GG_B1-B1pair-C1'pair":1, "cWS_GG_C4'-C1'-B1-B1pair":2, "cWS_GG_B1-B1pair-C1'pair-C4'pair":1, "cWS_GG_alpha_1":2, "cWS_GG_alpha_2":2, "cWS_GG_dB1":4, "cWS_GG_dB2":3,
"tWS_GG_tips_distance":8, "tWS_GG_C1'-B1-B1pair":3, "tWS_GG_B1-B1pair-C1'pair":2, "tWS_GG_C4'-C1'-B1-B1pair":3, "tWS_GG_B1-B1pair-C1'pair-C4'pair":2, "tWS_GG_alpha_1":1, "tWS_GG_alpha_2":1, "tWS_GG_dB1":1, "tWS_GG_dB2":3,
"cSW_GG_tips_distance":1, "cSW_GG_C1'-B1-B1pair":1, "cSW_GG_B1-B1pair-C1'pair":1, "cSW_GG_C4'-C1'-B1-B1pair":1, "cSW_GG_B1-B1pair-C1'pair-C4'pair":2, "cSW_GG_alpha_1":2, "cSW_GG_alpha_2":2, "cSW_GG_dB1":2, "cSW_GG_dB2":2,
"tSW_GG_tips_distance":5, "tSW_GG_C1'-B1-B1pair":3, "tSW_GG_B1-B1pair-C1'pair":2, "tSW_GG_C4'-C1'-B1-B1pair":3, "tSW_GG_B1-B1pair-C1'pair-C4'pair":2, "tSW_GG_alpha_1":1, "tSW_GG_alpha_2":3, "tSW_GG_dB1":2, "tSW_GG_dB2":1,
"cHH_GG_tips_distance":4, "cHH_GG_C1'-B1-B1pair":1, "cHH_GG_B1-B1pair-C1'pair":1, "cHH_GG_C4'-C1'-B1-B1pair":2, "cHH_GG_B1-B1pair-C1'pair-C4'pair":3, "cHH_GG_alpha_1":1, "cHH_GG_alpha_2":2, "cHH_GG_dB1":2, "cHH_GG_dB2":3,
"tHH_GG_tips_distance":8, "tHH_GG_C1'-B1-B1pair":2, "tHH_GG_B1-B1pair-C1'pair":2, "tHH_GG_C4'-C1'-B1-B1pair":2, "tHH_GG_B1-B1pair-C1'pair-C4'pair":3, "tHH_GG_alpha_1":2, "tHH_GG_alpha_2":2, "tHH_GG_dB1":2, "tHH_GG_dB2":3,
"cSH_GG_tips_distance":2, "cSH_GG_C1'-B1-B1pair":2, "cSH_GG_B1-B1pair-C1'pair":1, "cSH_GG_C4'-C1'-B1-B1pair":1, "cSH_GG_B1-B1pair-C1'pair-C4'pair":2, "cSH_GG_alpha_1":2, "cSH_GG_alpha_2":1, "cSH_GG_dB1":1, "cSH_GG_dB2":1,
"tSH_GG_tips_distance":2, "tSH_GG_C1'-B1-B1pair":2, "tSH_GG_B1-B1pair-C1'pair":2, "tSH_GG_C4'-C1'-B1-B1pair":2, "tSH_GG_B1-B1pair-C1'pair-C4'pair":2, "tSH_GG_alpha_1":2, "tSH_GG_alpha_2":2, "tSH_GG_dB1":1, "tSH_GG_dB2":2,
"cHS_GG_tips_distance":2, "cHS_GG_C1'-B1-B1pair":1, "cHS_GG_B1-B1pair-C1'pair":2, "cHS_GG_C4'-C1'-B1-B1pair":2, "cHS_GG_B1-B1pair-C1'pair-C4'pair":1, "cHS_GG_alpha_1":1, "cHS_GG_alpha_2":2, "cHS_GG_dB1":1, "cHS_GG_dB2":2,
"tHS_GG_tips_distance":2, "tHS_GG_C1'-B1-B1pair":2, "tHS_GG_B1-B1pair-C1'pair":2, "tHS_GG_C4'-C1'-B1-B1pair":2, "tHS_GG_B1-B1pair-C1'pair-C4'pair":1, "tHS_GG_alpha_1":2, "tHS_GG_alpha_2":3, "tHS_GG_dB1":2, "tHS_GG_dB2":1,
"cSS_GG_tips_distance":2, "cSS_GG_C1'-B1-B1pair":2, "cSS_GG_B1-B1pair-C1'pair":2, "cSS_GG_C4'-C1'-B1-B1pair":1, "cSS_GG_B1-B1pair-C1'pair-C4'pair":1, "cSS_GG_alpha_1":2, "cSS_GG_alpha_2":3, "cSS_GG_dB1":3, "cSS_GG_dB2":5,
"tSS_GG_tips_distance":2, "tSS_GG_C1'-B1-B1pair":3, "tSS_GG_B1-B1pair-C1'pair":2, "tSS_GG_C4'-C1'-B1-B1pair":2, "tSS_GG_B1-B1pair-C1'pair-C4'pair":1, "tSS_GG_alpha_1":1, "tSS_GG_alpha_2":3, "tSS_GG_dB1":3, "tSS_GG_dB2":2,
"cWW_GU_tips_distance":2, "cWW_GU_C1'-B1-B1pair":2, "cWW_GU_B1-B1pair-C1'pair":2, "cWW_GU_C4'-C1'-B1-B1pair":2, "cWW_GU_B1-B1pair-C1'pair-C4'pair":1, "cWW_GU_alpha_1":3, "cWW_GU_alpha_2":2, "cWW_GU_dB1":4, "cWW_GU_dB2":3,
"tWW_GU_tips_distance":2, "tWW_GU_C1'-B1-B1pair":3, "tWW_GU_B1-B1pair-C1'pair":2, "tWW_GU_C4'-C1'-B1-B1pair":2, "tWW_GU_B1-B1pair-C1'pair-C4'pair":3, "tWW_GU_alpha_1":2, "tWW_GU_alpha_2":2, "tWW_GU_dB1":3, "tWW_GU_dB2":3,
"cWH_GU_tips_distance":2, "cWH_GU_C1'-B1-B1pair":1, "cWH_GU_B1-B1pair-C1'pair":2, "cWH_GU_C4'-C1'-B1-B1pair":1, "cWH_GU_B1-B1pair-C1'pair-C4'pair":2, "cWH_GU_alpha_1":2, "cWH_GU_alpha_2":4, "cWH_GU_dB1":3, "cWH_GU_dB2":1,
"tWH_GU_tips_distance":8, "tWH_GU_C1'-B1-B1pair":1, "tWH_GU_B1-B1pair-C1'pair":2, "tWH_GU_C4'-C1'-B1-B1pair":2, "tWH_GU_B1-B1pair-C1'pair-C4'pair":2, "tWH_GU_alpha_1":2, "tWH_GU_alpha_2":2, "tWH_GU_dB1":3, "tWH_GU_dB2":1,
"cHW_GU_tips_distance":4, "cHW_GU_C1'-B1-B1pair":2, "cHW_GU_B1-B1pair-C1'pair":1, "cHW_GU_C4'-C1'-B1-B1pair":2, "cHW_GU_B1-B1pair-C1'pair-C4'pair":2, "cHW_GU_alpha_1":2, "cHW_GU_alpha_2":2, "cHW_GU_dB1":3, "cHW_GU_dB2":3,
"tHW_GU_tips_distance":1, "tHW_GU_C1'-B1-B1pair":3, "tHW_GU_B1-B1pair-C1'pair":1, "tHW_GU_C4'-C1'-B1-B1pair":2, "tHW_GU_B1-B1pair-C1'pair-C4'pair":3, "tHW_GU_alpha_1":3, "tHW_GU_alpha_2":1, "tHW_GU_dB1":2, "tHW_GU_dB2":5,
"cWS_GU_tips_distance":2, "cWS_GU_C1'-B1-B1pair":1, "cWS_GU_B1-B1pair-C1'pair":1, "cWS_GU_C4'-C1'-B1-B1pair":1, "cWS_GU_B1-B1pair-C1'pair-C4'pair":2, "cWS_GU_alpha_1":3, "cWS_GU_alpha_2":3, "cWS_GU_dB1":2, "cWS_GU_dB2":3,
"tWS_GU_tips_distance":4, "tWS_GU_C1'-B1-B1pair":3, "tWS_GU_B1-B1pair-C1'pair":1, "tWS_GU_C4'-C1'-B1-B1pair":3, "tWS_GU_B1-B1pair-C1'pair-C4'pair":2, "tWS_GU_alpha_1":1, "tWS_GU_alpha_2":2, "tWS_GU_dB1":3, "tWS_GU_dB2":3,
"cSW_GU_tips_distance":2, "cSW_GU_C1'-B1-B1pair":2, "cSW_GU_B1-B1pair-C1'pair":2, "cSW_GU_C4'-C1'-B1-B1pair":2, "cSW_GU_B1-B1pair-C1'pair-C4'pair":2, "cSW_GU_alpha_1":1, "cSW_GU_alpha_2":2, "cSW_GU_dB1":3, "cSW_GU_dB2":2,
"tSW_GU_tips_distance":3, "tSW_GU_C1'-B1-B1pair":1, "tSW_GU_B1-B1pair-C1'pair":2, "tSW_GU_C4'-C1'-B1-B1pair":2, "tSW_GU_B1-B1pair-C1'pair-C4'pair":2, "tSW_GU_alpha_1":1, "tSW_GU_alpha_2":2, "tSW_GU_dB1":5, "tSW_GU_dB2":1,
"cHH_GU_tips_distance":5, "cHH_GU_C1'-B1-B1pair":2, "cHH_GU_B1-B1pair-C1'pair":3, "cHH_GU_C4'-C1'-B1-B1pair":2, "cHH_GU_B1-B1pair-C1'pair-C4'pair":2, "cHH_GU_alpha_1":2, "cHH_GU_alpha_2":2, "cHH_GU_dB1":5, "cHH_GU_dB2":3,
"tHH_GU_tips_distance":5, "tHH_GU_C1'-B1-B1pair":2, "tHH_GU_B1-B1pair-C1'pair":1, "tHH_GU_C4'-C1'-B1-B1pair":1, "tHH_GU_B1-B1pair-C1'pair-C4'pair":2, "tHH_GU_alpha_1":2, "tHH_GU_alpha_2":1, "tHH_GU_dB1":8, "tHH_GU_dB2":2,
"cSH_GU_tips_distance":3, "cSH_GU_C1'-B1-B1pair":1, "cSH_GU_B1-B1pair-C1'pair":2, "cSH_GU_C4'-C1'-B1-B1pair":3, "cSH_GU_B1-B1pair-C1'pair-C4'pair":2, "cSH_GU_alpha_1":2, "cSH_GU_alpha_2":1, "cSH_GU_dB1":2, "cSH_GU_dB2":2,
"tSH_GU_tips_distance":2, "tSH_GU_C1'-B1-B1pair":2, "tSH_GU_B1-B1pair-C1'pair":2, "tSH_GU_C4'-C1'-B1-B1pair":1, "tSH_GU_B1-B1pair-C1'pair-C4'pair":1, "tSH_GU_alpha_1":2, "tSH_GU_alpha_2":3, "tSH_GU_dB1":3, "tSH_GU_dB2":3,
"cHS_GU_tips_distance":8, "cHS_GU_C1'-B1-B1pair":1, "cHS_GU_B1-B1pair-C1'pair":1, "cHS_GU_C4'-C1'-B1-B1pair":2, "cHS_GU_B1-B1pair-C1'pair-C4'pair":2, "cHS_GU_alpha_1":1, "cHS_GU_alpha_2":1, "cHS_GU_dB1":4, "cHS_GU_dB2":3,
"tHS_GU_tips_distance":5, "tHS_GU_C1'-B1-B1pair":4, "tHS_GU_B1-B1pair-C1'pair":2, "tHS_GU_C4'-C1'-B1-B1pair":2, "tHS_GU_B1-B1pair-C1'pair-C4'pair":1, "tHS_GU_alpha_1":2, "tHS_GU_alpha_2":1, "tHS_GU_dB1":1, "tHS_GU_dB2":3,
"cSS_GU_tips_distance":2, "cSS_GU_C1'-B1-B1pair":3, "cSS_GU_B1-B1pair-C1'pair":2, "cSS_GU_C4'-C1'-B1-B1pair":2, "cSS_GU_B1-B1pair-C1'pair-C4'pair":2, "cSS_GU_alpha_1":2, "cSS_GU_alpha_2":1, "cSS_GU_dB1":3, "cSS_GU_dB2":4,
"tSS_GU_tips_distance":5, "tSS_GU_C1'-B1-B1pair":2, "tSS_GU_B1-B1pair-C1'pair":2, "tSS_GU_C4'-C1'-B1-B1pair":1, "tSS_GU_B1-B1pair-C1'pair-C4'pair":3, "tSS_GU_alpha_1":2, "tSS_GU_alpha_2":2, "tSS_GU_dB1":2, "tSS_GU_dB2":6,
"cWW_UA_tips_distance":4, "cWW_UA_C1'-B1-B1pair":2, "cWW_UA_B1-B1pair-C1'pair":2, "cWW_UA_C4'-C1'-B1-B1pair":1, "cWW_UA_B1-B1pair-C1'pair-C4'pair":2, "cWW_UA_alpha_1":2, "cWW_UA_alpha_2":2, "cWW_UA_dB1":2, "cWW_UA_dB2":7,
"tWW_UA_tips_distance":2, "tWW_UA_C1'-B1-B1pair":1, "tWW_UA_B1-B1pair-C1'pair":2, "tWW_UA_C4'-C1'-B1-B1pair":2, "tWW_UA_B1-B1pair-C1'pair-C4'pair":1, "tWW_UA_alpha_1":2, "tWW_UA_alpha_2":1, "tWW_UA_dB1":6, "tWW_UA_dB2":1,
"cWH_UA_tips_distance":3, "cWH_UA_C1'-B1-B1pair":3, "cWH_UA_B1-B1pair-C1'pair":3, "cWH_UA_C4'-C1'-B1-B1pair":3, "cWH_UA_B1-B1pair-C1'pair-C4'pair":2, "cWH_UA_alpha_1":2, "cWH_UA_alpha_2":3, "cWH_UA_dB1":4, "cWH_UA_dB2":3,
"tWH_UA_tips_distance":3, "tWH_UA_C1'-B1-B1pair":2, "tWH_UA_B1-B1pair-C1'pair":1, "tWH_UA_C4'-C1'-B1-B1pair":2, "tWH_UA_B1-B1pair-C1'pair-C4'pair":2, "tWH_UA_alpha_1":1, "tWH_UA_alpha_2":2, "tWH_UA_dB1":3, "tWH_UA_dB2":2,
"cHW_UA_tips_distance":5, "cHW_UA_C1'-B1-B1pair":1, "cHW_UA_B1-B1pair-C1'pair":1, "cHW_UA_C4'-C1'-B1-B1pair":3, "cHW_UA_B1-B1pair-C1'pair-C4'pair":1, "cHW_UA_alpha_1":1, "cHW_UA_alpha_2":1, "cHW_UA_dB1":3, "cHW_UA_dB2":1,
"tHW_UA_tips_distance":7, "tHW_UA_C1'-B1-B1pair":3, "tHW_UA_B1-B1pair-C1'pair":2, "tHW_UA_C4'-C1'-B1-B1pair":1, "tHW_UA_B1-B1pair-C1'pair-C4'pair":2, "tHW_UA_alpha_1":3, "tHW_UA_alpha_2":3, "tHW_UA_dB1":2, "tHW_UA_dB2":1,
"cWS_UA_tips_distance":1, "cWS_UA_C1'-B1-B1pair":2, "cWS_UA_B1-B1pair-C1'pair":3, "cWS_UA_C4'-C1'-B1-B1pair":2, "cWS_UA_B1-B1pair-C1'pair-C4'pair":1, "cWS_UA_alpha_1":2, "cWS_UA_alpha_2":2, "cWS_UA_dB1":3, "cWS_UA_dB2":4,
"tWS_UA_tips_distance":5, "tWS_UA_C1'-B1-B1pair":1, "tWS_UA_B1-B1pair-C1'pair":2, "tWS_UA_C4'-C1'-B1-B1pair":2, "tWS_UA_B1-B1pair-C1'pair-C4'pair":1, "tWS_UA_alpha_1":1, "tWS_UA_alpha_2":3, "tWS_UA_dB1":1, "tWS_UA_dB2":1,
"cSW_UA_tips_distance":2, "cSW_UA_C1'-B1-B1pair":1, "cSW_UA_B1-B1pair-C1'pair":1, "cSW_UA_C4'-C1'-B1-B1pair":2, "cSW_UA_B1-B1pair-C1'pair-C4'pair":2, "cSW_UA_alpha_1":2, "cSW_UA_alpha_2":3, "cSW_UA_dB1":3, "cSW_UA_dB2":3,
"tSW_UA_tips_distance":2, "tSW_UA_C1'-B1-B1pair":1, "tSW_UA_B1-B1pair-C1'pair":2, "tSW_UA_C4'-C1'-B1-B1pair":1, "tSW_UA_B1-B1pair-C1'pair-C4'pair":1, "tSW_UA_alpha_1":2, "tSW_UA_alpha_2":2, "tSW_UA_dB1":3, "tSW_UA_dB2":2,
"cHH_UA_tips_distance":4, "cHH_UA_C1'-B1-B1pair":1, "cHH_UA_B1-B1pair-C1'pair":1, "cHH_UA_C4'-C1'-B1-B1pair":1, "cHH_UA_B1-B1pair-C1'pair-C4'pair":2, "cHH_UA_alpha_1":2, "cHH_UA_alpha_2":2, "cHH_UA_dB1":5, "cHH_UA_dB2":2,
"tHH_UA_tips_distance":4, "tHH_UA_C1'-B1-B1pair":2, "tHH_UA_B1-B1pair-C1'pair":2, "tHH_UA_C4'-C1'-B1-B1pair":2, "tHH_UA_B1-B1pair-C1'pair-C4'pair":2, "tHH_UA_alpha_1":2, "tHH_UA_alpha_2":3, "tHH_UA_dB1":3, "tHH_UA_dB2":1,
"cSH_UA_tips_distance":4, "cSH_UA_C1'-B1-B1pair":1, "cSH_UA_B1-B1pair-C1'pair":1, "cSH_UA_C4'-C1'-B1-B1pair":2, "cSH_UA_B1-B1pair-C1'pair-C4'pair":2, "cSH_UA_alpha_1":2, "cSH_UA_alpha_2":2, "cSH_UA_dB1":3, "cSH_UA_dB2":2,
"tSH_UA_tips_distance":2, "tSH_UA_C1'-B1-B1pair":2, "tSH_UA_B1-B1pair-C1'pair":2, "tSH_UA_C4'-C1'-B1-B1pair":3, "tSH_UA_B1-B1pair-C1'pair-C4'pair":2, "tSH_UA_alpha_1":3, "tSH_UA_alpha_2":2, "tSH_UA_dB1":4, "tSH_UA_dB2":1,
"cHS_UA_tips_distance":5, "cHS_UA_C1'-B1-B1pair":2, "cHS_UA_B1-B1pair-C1'pair":2, "cHS_UA_C4'-C1'-B1-B1pair":2, "cHS_UA_B1-B1pair-C1'pair-C4'pair":2, "cHS_UA_alpha_1":2, "cHS_UA_alpha_2":2, "cHS_UA_dB1":1, "cHS_UA_dB2":3,
"tHS_UA_tips_distance":5, "tHS_UA_C1'-B1-B1pair":2, "tHS_UA_B1-B1pair-C1'pair":2, "tHS_UA_C4'-C1'-B1-B1pair":3, "tHS_UA_B1-B1pair-C1'pair-C4'pair":1, "tHS_UA_alpha_1":3, "tHS_UA_alpha_2":3, "tHS_UA_dB1":2, "tHS_UA_dB2":7,
"cSS_UA_tips_distance":2, "cSS_UA_C1'-B1-B1pair":2, "cSS_UA_B1-B1pair-C1'pair":2, "cSS_UA_C4'-C1'-B1-B1pair":2, "cSS_UA_B1-B1pair-C1'pair-C4'pair":1, "cSS_UA_alpha_1":1, "cSS_UA_alpha_2":1, "cSS_UA_dB1":2, "cSS_UA_dB2":1,
"tSS_UA_tips_distance":5, "tSS_UA_C1'-B1-B1pair":1, "tSS_UA_B1-B1pair-C1'pair":3, "tSS_UA_C4'-C1'-B1-B1pair":2, "tSS_UA_B1-B1pair-C1'pair-C4'pair":3, "tSS_UA_alpha_1":2, "tSS_UA_alpha_2":2, "tSS_UA_dB1":4, "tSS_UA_dB2":4,
"cWW_UC_tips_distance":3, "cWW_UC_C1'-B1-B1pair":1, "cWW_UC_B1-B1pair-C1'pair":2, "cWW_UC_C4'-C1'-B1-B1pair":2, "cWW_UC_B1-B1pair-C1'pair-C4'pair":2, "cWW_UC_alpha_1":2, "cWW_UC_alpha_2":1, "cWW_UC_dB1":1, "cWW_UC_dB2":2,
"tWW_UC_tips_distance":4, "tWW_UC_C1'-B1-B1pair":2, "tWW_UC_B1-B1pair-C1'pair":2, "tWW_UC_C4'-C1'-B1-B1pair":2, "tWW_UC_B1-B1pair-C1'pair-C4'pair":2, "tWW_UC_alpha_1":3, "tWW_UC_alpha_2":1, "tWW_UC_dB1":1, "tWW_UC_dB2":4,
"cWH_UC_tips_distance":2, "cWH_UC_C1'-B1-B1pair":2, "cWH_UC_B1-B1pair-C1'pair":2, "cWH_UC_C4'-C1'-B1-B1pair":2, "cWH_UC_B1-B1pair-C1'pair-C4'pair":4, "cWH_UC_alpha_1":2, "cWH_UC_alpha_2":3, "cWH_UC_dB1":3, "cWH_UC_dB2":3,
"tWH_UC_tips_distance":4, "tWH_UC_C1'-B1-B1pair":3, "tWH_UC_B1-B1pair-C1'pair":2, "tWH_UC_C4'-C1'-B1-B1pair":3, "tWH_UC_B1-B1pair-C1'pair-C4'pair":1, "tWH_UC_alpha_1":4, "tWH_UC_alpha_2":1, "tWH_UC_dB1":4, "tWH_UC_dB2":2,
"cHW_UC_tips_distance":5, "cHW_UC_C1'-B1-B1pair":2, "cHW_UC_B1-B1pair-C1'pair":2, "cHW_UC_C4'-C1'-B1-B1pair":1, "cHW_UC_B1-B1pair-C1'pair-C4'pair":2, "cHW_UC_alpha_1":2, "cHW_UC_alpha_2":2, "cHW_UC_dB1":2, "cHW_UC_dB2":6,
"tHW_UC_tips_distance":2, "tHW_UC_C1'-B1-B1pair":2, "tHW_UC_B1-B1pair-C1'pair":2, "tHW_UC_C4'-C1'-B1-B1pair":3, "tHW_UC_B1-B1pair-C1'pair-C4'pair":2, "tHW_UC_alpha_1":2, "tHW_UC_alpha_2":4, "tHW_UC_dB1":4, "tHW_UC_dB2":4,
"cWS_UC_tips_distance":4, "cWS_UC_C1'-B1-B1pair":2, "cWS_UC_B1-B1pair-C1'pair":2, "cWS_UC_C4'-C1'-B1-B1pair":2, "cWS_UC_B1-B1pair-C1'pair-C4'pair":2, "cWS_UC_alpha_1":3, "cWS_UC_alpha_2":2, "cWS_UC_dB1":3, "cWS_UC_dB2":2,
"tWS_UC_tips_distance":4, "tWS_UC_C1'-B1-B1pair":2, "tWS_UC_B1-B1pair-C1'pair":1, "tWS_UC_C4'-C1'-B1-B1pair":2, "tWS_UC_B1-B1pair-C1'pair-C4'pair":2, "tWS_UC_alpha_1":2, "tWS_UC_alpha_2":1, "tWS_UC_dB1":3, "tWS_UC_dB2":2,
"cSW_UC_tips_distance":4, "cSW_UC_C1'-B1-B1pair":1, "cSW_UC_B1-B1pair-C1'pair":2, "cSW_UC_C4'-C1'-B1-B1pair":2, "cSW_UC_B1-B1pair-C1'pair-C4'pair":2, "cSW_UC_alpha_1":2, "cSW_UC_alpha_2":3, "cSW_UC_dB1":3, "cSW_UC_dB2":6,
"tSW_UC_tips_distance":5, "tSW_UC_C1'-B1-B1pair":1, "tSW_UC_B1-B1pair-C1'pair":2, "tSW_UC_C4'-C1'-B1-B1pair":3, "tSW_UC_B1-B1pair-C1'pair-C4'pair":1, "tSW_UC_alpha_1":2, "tSW_UC_alpha_2":2, "tSW_UC_dB1":2, "tSW_UC_dB2":1,
"cHH_UC_tips_distance":5, "cHH_UC_C1'-B1-B1pair":2, "cHH_UC_B1-B1pair-C1'pair":1, "cHH_UC_C4'-C1'-B1-B1pair":2, "cHH_UC_B1-B1pair-C1'pair-C4'pair":2, "cHH_UC_alpha_1":1, "cHH_UC_alpha_2":3, "cHH_UC_dB1":7, "cHH_UC_dB2":3,
"tHH_UC_tips_distance":5, "tHH_UC_C1'-B1-B1pair":1, "tHH_UC_B1-B1pair-C1'pair":1, "tHH_UC_C4'-C1'-B1-B1pair":2, "tHH_UC_B1-B1pair-C1'pair-C4'pair":3, "tHH_UC_alpha_1":2, "tHH_UC_alpha_2":2, "tHH_UC_dB1":8, "tHH_UC_dB2":8,
"cSH_UC_tips_distance":5, "cSH_UC_C1'-B1-B1pair":2, "cSH_UC_B1-B1pair-C1'pair":2, "cSH_UC_C4'-C1'-B1-B1pair":2, "cSH_UC_B1-B1pair-C1'pair-C4'pair":1, "cSH_UC_alpha_1":2, "cSH_UC_alpha_2":3, "cSH_UC_dB1":5, "cSH_UC_dB2":3,
"tSH_UC_tips_distance":2, "tSH_UC_C1'-B1-B1pair":1, "tSH_UC_B1-B1pair-C1'pair":1, "tSH_UC_C4'-C1'-B1-B1pair":2, "tSH_UC_B1-B1pair-C1'pair-C4'pair":1, "tSH_UC_alpha_1":2, "tSH_UC_alpha_2":2, "tSH_UC_dB1":2, "tSH_UC_dB2":7,
"cHS_UC_tips_distance":5, "cHS_UC_C1'-B1-B1pair":2, "cHS_UC_B1-B1pair-C1'pair":2, "cHS_UC_C4'-C1'-B1-B1pair":1, "cHS_UC_B1-B1pair-C1'pair-C4'pair":3, "cHS_UC_alpha_1":3, "cHS_UC_alpha_2":2, "cHS_UC_dB1":6, "cHS_UC_dB2":7,
"tHS_UC_tips_distance":5, "tHS_UC_C1'-B1-B1pair":3, "tHS_UC_B1-B1pair-C1'pair":2, "tHS_UC_C4'-C1'-B1-B1pair":2, "tHS_UC_B1-B1pair-C1'pair-C4'pair":3, "tHS_UC_alpha_1":3, "tHS_UC_alpha_2":1, "tHS_UC_dB1":5, "tHS_UC_dB2":7,
"cSS_UC_tips_distance":5, "cSS_UC_C1'-B1-B1pair":2, "cSS_UC_B1-B1pair-C1'pair":1, "cSS_UC_C4'-C1'-B1-B1pair":3, "cSS_UC_B1-B1pair-C1'pair-C4'pair":1, "cSS_UC_alpha_1":3, "cSS_UC_alpha_2":3, "cSS_UC_dB1":8, "cSS_UC_dB2":5,
"tSS_UC_tips_distance":5, "tSS_UC_C1'-B1-B1pair":2, "tSS_UC_B1-B1pair-C1'pair":1, "tSS_UC_C4'-C1'-B1-B1pair":3, "tSS_UC_B1-B1pair-C1'pair-C4'pair":3, "tSS_UC_alpha_1":3, "tSS_UC_alpha_2":1, "tSS_UC_dB1":8, "tSS_UC_dB2":7,
"cWW_UG_tips_distance":3, "cWW_UG_C1'-B1-B1pair":2, "cWW_UG_B1-B1pair-C1'pair":3, "cWW_UG_C4'-C1'-B1-B1pair":2, "cWW_UG_B1-B1pair-C1'pair-C4'pair":2, "cWW_UG_alpha_1":2, "cWW_UG_alpha_2":3, "cWW_UG_dB1":4, "cWW_UG_dB2":3,
"tWW_UG_tips_distance":2, "tWW_UG_C1'-B1-B1pair":1, "tWW_UG_B1-B1pair-C1'pair":1, "tWW_UG_C4'-C1'-B1-B1pair":2, "tWW_UG_B1-B1pair-C1'pair-C4'pair":2, "tWW_UG_alpha_1":3, "tWW_UG_alpha_2":3, "tWW_UG_dB1":3, "tWW_UG_dB2":4,
"cWH_UG_tips_distance":2, "cWH_UG_C1'-B1-B1pair":1, "cWH_UG_B1-B1pair-C1'pair":2, "cWH_UG_C4'-C1'-B1-B1pair":2, "cWH_UG_B1-B1pair-C1'pair-C4'pair":2, "cWH_UG_alpha_1":2, "cWH_UG_alpha_2":2, "cWH_UG_dB1":2, "cWH_UG_dB2":2,
"tWH_UG_tips_distance":1, "tWH_UG_C1'-B1-B1pair":2, "tWH_UG_B1-B1pair-C1'pair":2, "tWH_UG_C4'-C1'-B1-B1pair":2, "tWH_UG_B1-B1pair-C1'pair-C4'pair":2, "tWH_UG_alpha_1":2, "tWH_UG_alpha_2":2, "tWH_UG_dB1":6, "tWH_UG_dB2":2,
"cHW_UG_tips_distance":2, "cHW_UG_C1'-B1-B1pair":2, "cHW_UG_B1-B1pair-C1'pair":2, "cHW_UG_C4'-C1'-B1-B1pair":1, "cHW_UG_B1-B1pair-C1'pair-C4'pair":2, "cHW_UG_alpha_1":1, "cHW_UG_alpha_2":2, "cHW_UG_dB1":4, "cHW_UG_dB2":4,
"tHW_UG_tips_distance":1, "tHW_UG_C1'-B1-B1pair":2, "tHW_UG_B1-B1pair-C1'pair":1, "tHW_UG_C4'-C1'-B1-B1pair":2, "tHW_UG_B1-B1pair-C1'pair-C4'pair":2, "tHW_UG_alpha_1":3, "tHW_UG_alpha_2":2, "tHW_UG_dB1":6, "tHW_UG_dB2":3,
"cWS_UG_tips_distance":2, "cWS_UG_C1'-B1-B1pair":4, "cWS_UG_B1-B1pair-C1'pair":2, "cWS_UG_C4'-C1'-B1-B1pair":3, "cWS_UG_B1-B1pair-C1'pair-C4'pair":2, "cWS_UG_alpha_1":2, "cWS_UG_alpha_2":2, "cWS_UG_dB1":2, "cWS_UG_dB2":2,
"tWS_UG_tips_distance":5, "tWS_UG_C1'-B1-B1pair":2, "tWS_UG_B1-B1pair-C1'pair":2, "tWS_UG_C4'-C1'-B1-B1pair":2, "tWS_UG_B1-B1pair-C1'pair-C4'pair":2, "tWS_UG_alpha_1":2, "tWS_UG_alpha_2":1, "tWS_UG_dB1":3, "tWS_UG_dB2":5,
"cSW_UG_tips_distance":2, "cSW_UG_C1'-B1-B1pair":2, "cSW_UG_B1-B1pair-C1'pair":3, "cSW_UG_C4'-C1'-B1-B1pair":2, "cSW_UG_B1-B1pair-C1'pair-C4'pair":1, "cSW_UG_alpha_1":2, "cSW_UG_alpha_2":2, "cSW_UG_dB1":3, "cSW_UG_dB2":2,
"tSW_UG_tips_distance":4, "tSW_UG_C1'-B1-B1pair":1, "tSW_UG_B1-B1pair-C1'pair":1, "tSW_UG_C4'-C1'-B1-B1pair":2, "tSW_UG_B1-B1pair-C1'pair-C4'pair":3, "tSW_UG_alpha_1":2, "tSW_UG_alpha_2":2, "tSW_UG_dB1":2, "tSW_UG_dB2":2,
"cHH_UG_tips_distance":5, "cHH_UG_C1'-B1-B1pair":3, "cHH_UG_B1-B1pair-C1'pair":2, "cHH_UG_C4'-C1'-B1-B1pair":2, "cHH_UG_B1-B1pair-C1'pair-C4'pair":2, "cHH_UG_alpha_1":2, "cHH_UG_alpha_2":3, "cHH_UG_dB1":4, "cHH_UG_dB2":5,
"tHH_UG_tips_distance":5, "tHH_UG_C1'-B1-B1pair":2, "tHH_UG_B1-B1pair-C1'pair":2, "tHH_UG_C4'-C1'-B1-B1pair":2, "tHH_UG_B1-B1pair-C1'pair-C4'pair":3, "tHH_UG_alpha_1":3, "tHH_UG_alpha_2":2, "tHH_UG_dB1":3, "tHH_UG_dB2":2,
"cSH_UG_tips_distance":5, "cSH_UG_C1'-B1-B1pair":1, "cSH_UG_B1-B1pair-C1'pair":2, "cSH_UG_C4'-C1'-B1-B1pair":2, "cSH_UG_B1-B1pair-C1'pair-C4'pair":2, "cSH_UG_alpha_1":2, "cSH_UG_alpha_2":2, "cSH_UG_dB1":3, "cSH_UG_dB2":4,
"tSH_UG_tips_distance":5, "tSH_UG_C1'-B1-B1pair":2, "tSH_UG_B1-B1pair-C1'pair":1, "tSH_UG_C4'-C1'-B1-B1pair":2, "tSH_UG_B1-B1pair-C1'pair-C4'pair":1, "tSH_UG_alpha_1":3, "tSH_UG_alpha_2":1, "tSH_UG_dB1":2, "tSH_UG_dB2":2,
"cHS_UG_tips_distance":3, "cHS_UG_C1'-B1-B1pair":2, "cHS_UG_B1-B1pair-C1'pair":3, "cHS_UG_C4'-C1'-B1-B1pair":2, "cHS_UG_B1-B1pair-C1'pair-C4'pair":4, "cHS_UG_alpha_1":2, "cHS_UG_alpha_2":3, "cHS_UG_dB1":3, "cHS_UG_dB2":4,
"tHS_UG_tips_distance":7, "tHS_UG_C1'-B1-B1pair":1, "tHS_UG_B1-B1pair-C1'pair":3, "tHS_UG_C4'-C1'-B1-B1pair":2, "tHS_UG_B1-B1pair-C1'pair-C4'pair":1, "tHS_UG_alpha_1":2, "tHS_UG_alpha_2":3, "tHS_UG_dB1":2, "tHS_UG_dB2":1,
"cSS_UG_tips_distance":2, "cSS_UG_C1'-B1-B1pair":2, "cSS_UG_B1-B1pair-C1'pair":2, "cSS_UG_C4'-C1'-B1-B1pair":2, "cSS_UG_B1-B1pair-C1'pair-C4'pair":2, "cSS_UG_alpha_1":1, "cSS_UG_alpha_2":2, "cSS_UG_dB1":2, "cSS_UG_dB2":3,
"tSS_UG_tips_distance":5, "tSS_UG_C1'-B1-B1pair":2, "tSS_UG_B1-B1pair-C1'pair":2, "tSS_UG_C4'-C1'-B1-B1pair":1, "tSS_UG_B1-B1pair-C1'pair-C4'pair":2, "tSS_UG_alpha_1":2, "tSS_UG_alpha_2":2, "tSS_UG_dB1":3, "tSS_UG_dB2":4,
"cWW_UU_tips_distance":1, "cWW_UU_C1'-B1-B1pair":2, "cWW_UU_B1-B1pair-C1'pair":3, "cWW_UU_C4'-C1'-B1-B1pair":3, "cWW_UU_B1-B1pair-C1'pair-C4'pair":2, "cWW_UU_alpha_1":2, "cWW_UU_alpha_2":2, "cWW_UU_dB1":2, "cWW_UU_dB2":1,
"tWW_UU_tips_distance":3, "tWW_UU_C1'-B1-B1pair":2, "tWW_UU_B1-B1pair-C1'pair":2, "tWW_UU_C4'-C1'-B1-B1pair":2, "tWW_UU_B1-B1pair-C1'pair-C4'pair":2, "tWW_UU_alpha_1":2, "tWW_UU_alpha_2":2, "tWW_UU_dB1":4, "tWW_UU_dB2":5,
"cWH_UU_tips_distance":2, "cWH_UU_C1'-B1-B1pair":2, "cWH_UU_B1-B1pair-C1'pair":2, "cWH_UU_C4'-C1'-B1-B1pair":3, "cWH_UU_B1-B1pair-C1'pair-C4'pair":3, "cWH_UU_alpha_1":2, "cWH_UU_alpha_2":3, "cWH_UU_dB1":3, "cWH_UU_dB2":5,
"tWH_UU_tips_distance":3, "tWH_UU_C1'-B1-B1pair":2, "tWH_UU_B1-B1pair-C1'pair":2, "tWH_UU_C4'-C1'-B1-B1pair":2, "tWH_UU_B1-B1pair-C1'pair-C4'pair":2, "tWH_UU_alpha_1":3, "tWH_UU_alpha_2":3, "tWH_UU_dB1":2, "tWH_UU_dB2":2,
"cHW_UU_tips_distance":1, "cHW_UU_C1'-B1-B1pair":2, "cHW_UU_B1-B1pair-C1'pair":3, "cHW_UU_C4'-C1'-B1-B1pair":1, "cHW_UU_B1-B1pair-C1'pair-C4'pair":3, "cHW_UU_alpha_1":2, "cHW_UU_alpha_2":2, "cHW_UU_dB1":3, "cHW_UU_dB2":4,
"tHW_UU_tips_distance":3, "tHW_UU_C1'-B1-B1pair":3, "tHW_UU_B1-B1pair-C1'pair":2, "tHW_UU_C4'-C1'-B1-B1pair":2, "tHW_UU_B1-B1pair-C1'pair-C4'pair":2, "tHW_UU_alpha_1":2, "tHW_UU_alpha_2":3, "tHW_UU_dB1":2, "tHW_UU_dB2":2,
"cWS_UU_tips_distance":5, "cWS_UU_C1'-B1-B1pair":1, "cWS_UU_B1-B1pair-C1'pair":1, "cWS_UU_C4'-C1'-B1-B1pair":2, "cWS_UU_B1-B1pair-C1'pair-C4'pair":3, "cWS_UU_alpha_1":2, "cWS_UU_alpha_2":1, "cWS_UU_dB1":2, "cWS_UU_dB2":1,
"tWS_UU_tips_distance":3, "tWS_UU_C1'-B1-B1pair":2, "tWS_UU_B1-B1pair-C1'pair":2, "tWS_UU_C4'-C1'-B1-B1pair":3, "tWS_UU_B1-B1pair-C1'pair-C4'pair":2, "tWS_UU_alpha_1":2, "tWS_UU_alpha_2":2, "tWS_UU_dB1":3, "tWS_UU_dB2":3,
"cSW_UU_tips_distance":5, "cSW_UU_C1'-B1-B1pair":1, "cSW_UU_B1-B1pair-C1'pair":3, "cSW_UU_C4'-C1'-B1-B1pair":2, "cSW_UU_B1-B1pair-C1'pair-C4'pair":3, "cSW_UU_alpha_1":2, "cSW_UU_alpha_2":3, "cSW_UU_dB1":1, "cSW_UU_dB2":4,
"tSW_UU_tips_distance":6, "tSW_UU_C1'-B1-B1pair":3, "tSW_UU_B1-B1pair-C1'pair":1, "tSW_UU_C4'-C1'-B1-B1pair":2, "tSW_UU_B1-B1pair-C1'pair-C4'pair":2, "tSW_UU_alpha_1":1, "tSW_UU_alpha_2":2, "tSW_UU_dB1":3, "tSW_UU_dB2":3,
"cHH_UU_tips_distance":5, "cHH_UU_C1'-B1-B1pair":1, "cHH_UU_B1-B1pair-C1'pair":1, "cHH_UU_C4'-C1'-B1-B1pair":3, "cHH_UU_B1-B1pair-C1'pair-C4'pair":2, "cHH_UU_alpha_1":2, "cHH_UU_alpha_2":2, "cHH_UU_dB1":1, "cHH_UU_dB2":5,
"tHH_UU_tips_distance":5, "tHH_UU_C1'-B1-B1pair":2, "tHH_UU_B1-B1pair-C1'pair":3, "tHH_UU_C4'-C1'-B1-B1pair":1, "tHH_UU_B1-B1pair-C1'pair-C4'pair":3, "tHH_UU_alpha_1":2, "tHH_UU_alpha_2":4, "tHH_UU_dB1":4, "tHH_UU_dB2":5,
"cSH_UU_tips_distance":5, "cSH_UU_C1'-B1-B1pair":1, "cSH_UU_B1-B1pair-C1'pair":3, "cSH_UU_C4'-C1'-B1-B1pair":2, "cSH_UU_B1-B1pair-C1'pair-C4'pair":2, "cSH_UU_alpha_1":3, "cSH_UU_alpha_2":2, "cSH_UU_dB1":2, "cSH_UU_dB2":5,
"tSH_UU_tips_distance":5, "tSH_UU_C1'-B1-B1pair":2, "tSH_UU_B1-B1pair-C1'pair":1, "tSH_UU_C4'-C1'-B1-B1pair":3, "tSH_UU_B1-B1pair-C1'pair-C4'pair":3, "tSH_UU_alpha_1":1, "tSH_UU_alpha_2":1, "tSH_UU_dB1":1, "tSH_UU_dB2":5,
"cHS_UU_tips_distance":7, "cHS_UU_C1'-B1-B1pair":2, "cHS_UU_B1-B1pair-C1'pair":2, "cHS_UU_C4'-C1'-B1-B1pair":2, "cHS_UU_B1-B1pair-C1'pair-C4'pair":2, "cHS_UU_alpha_1":2, "cHS_UU_alpha_2":2, "cHS_UU_dB1":3, "cHS_UU_dB2":2,
"tHS_UU_tips_distance":5, "tHS_UU_C1'-B1-B1pair":1, "tHS_UU_B1-B1pair-C1'pair":2, "tHS_UU_C4'-C1'-B1-B1pair":2, "tHS_UU_B1-B1pair-C1'pair-C4'pair":1, "tHS_UU_alpha_1":1, "tHS_UU_alpha_2":2, "tHS_UU_dB1":4, "tHS_UU_dB2":1,
"cSS_UU_tips_distance":5, "cSS_UU_C1'-B1-B1pair":2, "cSS_UU_B1-B1pair-C1'pair":2, "cSS_UU_C4'-C1'-B1-B1pair":2, "cSS_UU_B1-B1pair-C1'pair-C4'pair":3, "cSS_UU_alpha_1":2, "cSS_UU_alpha_2":2, "cSS_UU_dB1":6, "cSS_UU_dB2":4,
"tSS_UU_tips_distance":8, "tSS_UU_C1'-B1-B1pair":1, "tSS_UU_B1-B1pair-C1'pair":1, "tSS_UU_C4'-C1'-B1-B1pair":2, "tSS_UU_B1-B1pair-C1'pair-C4'pair":1, "tSS_UU_alpha_1":1, "tSS_UU_alpha_2":2, "tSS_UU_dB1":3, "tSS_UU_dB2":4,
}
@trace_unhandled_exceptions
def retrieve_angles(db, res):
"""
Retrieve torsion angles from RNANet.db and convert them to degrees
"""
# Retrieve angle values
with sqlite3.connect(runDir + "/results/RNANet.db") as conn:
conn.execute('pragma journal_mode=wal')
df = pd.read_sql(f"""SELECT chain_id, nt_name, alpha, beta, gamma, delta, epsilon, zeta, chi
FROM (
SELECT chain_id FROM chain JOIN structure ON chain.structure_id = structure.pdb_id
WHERE chain.rfam_acc = 'unmappd' AND structure.resolution <= {res} AND issue = 0
) AS c NATURAL JOIN nucleotide
WHERE nt_name='A' OR nt_name='C' OR nt_name='G' OR nt_name='U';""", conn)
# convert to degrees
j = (180.0/np.pi)
torsions = df.iloc[:, 0:2].merge(
df.iloc[:, 2:9].applymap(lambda x: j*x if x <= np.pi else j*x-360.0, na_action='ignore'),
left_index=True, right_index=True
)
return torsions
def retrieve_eta_theta(db, res):
"""
Retrieve pseudotorsions from RNANet.db and convert them to degrees
"""
# Retrieve angle values
with sqlite3.connect(runDir + "/results/RNANet.db") as conn:
conn.execute('pragma journal_mode=wal')
df = pd.read_sql(f"""SELECT chain_id, nt_name, eta, theta, eta_prime, theta_prime, eta_base, theta_base
FROM (
SELECT chain_id FROM chain JOIN structure ON chain.structure_id = structure.pdb_id
WHERE chain.rfam_acc = 'unmappd' AND structure.resolution <= {res} AND issue = 0
) AS c NATURAL JOIN nucleotide
WHERE nt_name='A' OR nt_name='C' OR nt_name='G' OR nt_name='U';""", conn)
# convert to degrees
j = (180.0/np.pi)
pseudotorsions = df.iloc[:, 0:2].merge(
df.iloc[:, 2:8].applymap(lambda x: j*x if x <= np.pi else j*x-360.0, na_action='ignore'),
left_index=True, right_index=True
)
return pseudotorsions
def get_euclidian_distance(L1, L2):
"""
Returns the distance between two points (coordinates in lists)
"""
if len(L1)*len(L2) == 0:
return np.nan
if len(L1) == 1:
L1 = L1[0]
if len(L2) == 1:
L2 = L2[0]
e = 0
for i in range(len(L1)):
try:
e += float(L1[i] - L2[i])**2
except TypeError:
print("Terms: ", L1, L2)
except IndexError:
print("Terms: ", L1, L2)
return np.sqrt(e)
def get_flat_angle(L1, L2, L3):
"""
Returns the flat angles (in radians) defined by 3 points.
L1, L2, L3 : lists of (x,y,z) coordinates
Returns NaN if one of the lists is empty.
"""
if len(L1)*len(L2)*len(L3) == 0:
return np.nan
return calc_angle(Vector(L1[0]), Vector(L2[0]), Vector(L3[0]))*(180/np.pi)
def get_torsion_angle(L1, L2, L3, L4):
if len(L1)*len(L2)*len(L3)*len(L4) == 0:
return np.nan
return calc_dihedral(Vector(L1[0]), Vector(L2[0]), Vector(L3[0]), Vector(L4[0]))*(180/np.pi)
def pos_b1(res):
"""
Returns the coordinates of virtual atom B1 (center of the first aromatic cycle)
"""
coordb1=[]
somme_x_b1=0
somme_y_b1=0
somme_z_b1=0
moy_x_b1=0
moy_y_b1=0
moy_z_b1=0
#different cases
#some residues have 2 aromatic cycles
if res.get_resname() in ['A', 'G', '2MG', '7MG', 'MA6', '6IA', 'OMG' , '2MA', 'B9B', 'A2M', '1MA', 'E7G', 'P7G', 'B8W', 'B8K', 'BGH', '6MZ', 'E6G', 'MHG', 'M7A', 'M2G', 'P5P', 'G7M', '1MG', 'T6A', 'MIA', 'YG', 'YYG', 'I', 'DG', 'N79', '574', 'DJF', 'AET', '12A', 'ANZ', 'UY4'] :
c=0
names=[]
for atom in res :
if (atom.get_fullname() in ['N9', 'C8', 'N7', 'C4', 'C5']) :
c=c+1
names.append(atom.get_name())
coord=atom.get_vector()
somme_x_b1=somme_x_b1+coord[0]
somme_y_b1=somme_y_b1+coord[1]
somme_z_b1=somme_z_b1+coord[2]
else :
c=c
#calcul coord B1
if c != 0 :
moy_x_b1=somme_x_b1/c
moy_y_b1=somme_y_b1/c
moy_z_b1=somme_z_b1/c
coordb1.append(moy_x_b1)
coordb1.append(moy_y_b1)
coordb1.append(moy_z_b1)
#others have only one cycle
if res.get_resname() in ['C', 'U', 'AG9', '70U', '1RN', 'RSP', '3AU', 'CM0', 'U8U', 'IU', 'E3C', '4SU', '5HM', 'LV2', 'LHH', '4AC', 'CH', 'Y5P', '2MU', '4OC', 'B8T', 'JMH', 'JMC', 'DC', 'B9H', 'UR3', 'I4U', 'B8Q', 'P4U', 'OMU', 'OMC', '5MU', 'H2U', 'CBV', 'M1Y', 'B8N', '3TD', 'B8H'] :
c=0
for atom in res :
if (atom.get_fullname() in ['C6', 'N3', 'N1', 'C2', 'C4', 'C5']):
c=c+1
coord=atom.get_vector()
somme_x_b1=somme_x_b1+coord[0]
somme_y_b1=somme_y_b1+coord[1]
somme_z_b1=somme_z_b1+coord[2]
#calcul coord B1
if c != 0 :
moy_x_b1=somme_x_b1/c
moy_y_b1=somme_y_b1/c
moy_z_b1=somme_z_b1/c
coordb1.append(moy_x_b1)
coordb1.append(moy_y_b1)
coordb1.append(moy_z_b1)
if len(coordb1):
return [coordb1]
else:
return []
def pos_b2(res):
"""
Returns the coordinates of virtual atom B2 (center of the second aromatic cycle, if exists)
"""
coordb2=[]
somme_x_b2=0
somme_y_b2=0
somme_z_b2=0
moy_x_b2=0
moy_y_b2=0
moy_z_b2=0
if res.get_resname() in ['A', 'G', '2MG', '7MG', 'MA6', '6IA', 'OMG' , '2MA', 'B9B', 'A2M', '1MA', 'E7G', 'P7G', 'B8W', 'B8K', 'BGH', '6MZ', 'E6G', 'MHG', 'M7A', 'M2G', 'P5P', 'G7M', '1MG', 'T6A', 'MIA', 'YG', 'YYG', 'I', 'DG', 'N79', '574', 'DJF', 'AET', '12A', 'ANZ', 'UY4'] : #2 cycles aromatiques
c=0
for atom in res :
if atom.get_fullname() in ['C6', 'N3', 'N1', 'C2', 'C4', 'C5'] :
c=c+1
coord=atom.get_vector()
somme_x_b2=somme_x_b2+coord[0]
somme_y_b2=somme_y_b2+coord[1]
somme_z_b2=somme_z_b2+coord[2]
#calcul coord B2
if c!=0 :
moy_x_b2=somme_x_b2/c
moy_y_b2=somme_y_b2/c
moy_z_b2=somme_z_b2/c
coordb2.append(moy_x_b2)
coordb2.append(moy_y_b2)
coordb2.append(moy_z_b2)
if len(coordb2):
return [coordb2]
else:
return []
@trace_unhandled_exceptions
def measures_aa(name, s, thr_idx):
"""
Measures the distance between atoms linked by covalent bonds
"""
# do not recompute something already computed
if os.path.isfile(runDir + "/results/geometry/all-atoms/distances/dist_atoms_" + name + ".csv"):
return
last_o3p = [] # o3 'of the previous nucleotide linked to the P of the current nucleotide
l_common = []
l_purines = []
l_pyrimidines = []
setproctitle(f"RNANet statistics.py Worker {thr_idx+1} measure_aa_dists({name})")
chain = next(s[0].get_chains()) # 1 chain per file
residues = list(chain.get_residues())
pbar = tqdm(total=len(residues), position=thr_idx+1, desc=f"Worker {thr_idx+1}: {name} measure_aa_dists", unit="res", leave=False)
pbar.update(0)
for res in chain :
# for residues A, G, C, U
op3_p = []
p_op1 = []
p_op2 = []
p_o5p = []
o5p_c5p = []
c5p_c4p = []
c4p_o4p = []
o4p_c1p = []
c1p_c2p = []
c2p_o2p = []
c2p_c3p = []
c3p_o3p = []
c4p_c3p = []
# if res = A or G
c1p_n9 = None
n9_c8 = None
c8_n7 = None
n7_c5 = None
c5_c6 = None
c6_n1 = None
n1_c2 = None
c2_n3 = None
n3_c4 = None
c4_n9 = None
c4_c5 = None
# if res = G
c6_o6 = None
c2_n2 = None
# if res = A
c6_n6 = None
# if res = C or U
c1p_n1 = None
n1_c6 = None
c6_c5 = None
c5_c4 = None
c4_n3 = None
n3_c2 = None
c2_n1 = None
c2_o2 = None
# if res = C
c4_n4 = None
# if res = U
c4_o4 = None
last_o3p_p = None
if res.get_resname()=='A' or res.get_resname()=='G' or res.get_resname()=='C' or res.get_resname()=='U' :
# get the coordinates of the atoms
atom_p = [ atom.get_coord() for atom in res if atom.get_name() == "P"]
atom_op3 = [ atom.get_coord() for atom in res if "OP3" in atom.get_fullname() ] # OP3 belongs to previous nucleotide !
atom_op1 = [ atom.get_coord() for atom in res if "OP1" in atom.get_fullname() ]
atom_op2 = [ atom.get_coord() for atom in res if "OP2" in atom.get_fullname() ]
atom_o5p= [ atom.get_coord() for atom in res if "O5'" in atom.get_fullname() ]
atom_c5p = [ atom.get_coord() for atom in res if "C5'" in atom.get_fullname() ]
atom_c4p = [ atom.get_coord() for atom in res if "C4'" in atom.get_fullname() ]
atom_o4p = [ atom.get_coord() for atom in res if "O4'" in atom.get_fullname() ]
atom_c3p = [ atom.get_coord() for atom in res if "C3'" in atom.get_fullname() ]
atom_o3p = [ atom.get_coord() for atom in res if "O3'" in atom.get_fullname() ]
atom_c2p = [ atom.get_coord() for atom in res if "C2'" in atom.get_fullname() ]
atom_o2p = [ atom.get_coord() for atom in res if "O2'" in atom.get_fullname() ]
atom_c1p = [ atom.get_coord() for atom in res if "C1'" in atom.get_fullname() ]
atom_n9 = [ atom.get_coord() for atom in res if "N9" in atom.get_fullname() ]
atom_c8 = [ atom.get_coord() for atom in res if "C8" in atom.get_fullname() ]
atom_n7 = [ atom.get_coord() for atom in res if "N7" in atom.get_fullname() ]
atom_c5 = [ atom.get_coord() for atom in res if atom.get_name() == "C5"]
atom_c6 = [ atom.get_coord() for atom in res if "C6" in atom.get_fullname() ]
atom_o6 = [ atom.get_coord() for atom in res if "O6" in atom.get_fullname() ]
atom_n6 = [ atom.get_coord() for atom in res if "N6" in atom.get_fullname() ]
atom_n1 = [ atom.get_coord() for atom in res if "N1" in atom.get_fullname() ]
atom_c2 = [ atom.get_coord() for atom in res if atom.get_name() == "C2"]
atom_n2 = [ atom.get_coord() for atom in res if "N2" in atom.get_fullname() ]
atom_o2 = [ atom.get_coord() for atom in res if atom.get_name() == "O2"]
atom_n3 = [ atom.get_coord() for atom in res if "N3" in atom.get_fullname() ]
atom_c4 = [ atom.get_coord() for atom in res if atom.get_name() == "C4" ]
atom_n4 = [ atom.get_coord() for atom in res if "N4" in atom.get_fullname() ]
atom_o4 = [ atom.get_coord() for atom in res if atom.get_name() == "O4"]
if len(atom_op3):
last_o3p_p = get_euclidian_distance(atom_op3, atom_p) # This nucleotide has an OP3 atom (likely the begining of a chain)
else:
last_o3p_p = get_euclidian_distance(last_o3p, atom_p) # link with the previous nucleotide
p_op1 = get_euclidian_distance(atom_op1, atom_p)
p_op2 = get_euclidian_distance(atom_op2, atom_p)
p_o5p = get_euclidian_distance(atom_o5p, atom_p)
o5p_c5p = get_euclidian_distance(atom_o5p, atom_c5p)
c5p_c4p = get_euclidian_distance(atom_c5p, atom_c4p)
c4p_o4p = get_euclidian_distance(atom_c4p, atom_o4p)
c4p_c3p = get_euclidian_distance(atom_c4p, atom_c3p)
o4p_c1p = get_euclidian_distance(atom_o4p, atom_c1p)
c1p_c2p = get_euclidian_distance(atom_c1p, atom_c2p)
c2p_o2p = get_euclidian_distance(atom_c2p, atom_o2p)
c2p_c3p = get_euclidian_distance(atom_c2p, atom_c3p)
c3p_o3p = get_euclidian_distance(atom_c3p, atom_o3p)
last_o3p = atom_o3p # o3' of this residue becomes the previous o3' of the following
# different cases for the aromatic cycles
if res.get_resname()=='A' or res.get_resname()=='G':
# compute the distances between atoms of aromatic cycles
c1p_n9 = get_euclidian_distance(atom_c1p, atom_n9)
n9_c8 = get_euclidian_distance(atom_n9, atom_c8)
c8_n7 = get_euclidian_distance(atom_c8, atom_n7)
n7_c5 = get_euclidian_distance(atom_n7, atom_c5)
c5_c6 = get_euclidian_distance(atom_c5, atom_c6)
c6_o6 = get_euclidian_distance(atom_c6, atom_o6)
c6_n6 = get_euclidian_distance(atom_c6, atom_n6)
c6_n1 = get_euclidian_distance(atom_c6, atom_n1)
n1_c2 = get_euclidian_distance(atom_n1, atom_c2)
c2_n2 = get_euclidian_distance(atom_c2, atom_n2)
c2_n3 = get_euclidian_distance(atom_c2, atom_n3)
n3_c4 = get_euclidian_distance(atom_n3, atom_c4)
c4_n9 = get_euclidian_distance(atom_c4, atom_n9)
c4_c5 = get_euclidian_distance(atom_c4, atom_c5)
if res.get_resname()=='C' or res.get_resname()=='U' :
c1p_n1 = get_euclidian_distance(atom_c1p, atom_n1)
n1_c6 = get_euclidian_distance(atom_n1, atom_c6)
c6_c5 = get_euclidian_distance(atom_c6, atom_c5)
c5_c4 = get_euclidian_distance(atom_c5, atom_c4)
c4_n3 = get_euclidian_distance(atom_c4, atom_n3)
n3_c2 = get_euclidian_distance(atom_n3, atom_c2)
c2_o2 = get_euclidian_distance(atom_c2, atom_o2)
c2_n1 = get_euclidian_distance(atom_c2, atom_n1)
c4_n4 = get_euclidian_distance(atom_c4, atom_n4)
c4_o4 = get_euclidian_distance(atom_c4, atom_o4)
l_common.append([res.get_resname(), last_o3p_p, p_op1, p_op2, p_o5p, o5p_c5p, c5p_c4p, c4p_o4p, c4p_c3p, o4p_c1p, c1p_c2p, c2p_o2p, c2p_c3p, c3p_o3p] )
l_purines.append([c1p_n9, n9_c8, c8_n7, n7_c5, c5_c6, c6_o6, c6_n6, c6_n1, n1_c2, c2_n2, c2_n3, n3_c4, c4_n9, c4_c5])
l_pyrimidines.append([c1p_n1, n1_c6, c6_c5, c5_c4, c4_n3, n3_c2, c2_o2, c2_n1, c4_n4, c4_o4])
pbar.update(1)
df_comm = pd.DataFrame(l_common, columns=["Residue", "O3'-P", "P-OP1", "P-OP2", "P-O5'", "O5'-C5'", "C5'-C4'", "C4'-O4'", "C4'-C3'", "O4'-C1'", "C1'-C2'", "C2'-O2'", "C2'-C3'", "C3'-O3'"])
df_pur = pd.DataFrame(l_purines, columns=["C1'-N9", "N9-C8", "C8-N7", "N7-C5", "C5-C6", "C6-O6", "C6-N6", "C6-N1", "N1-C2", "C2-N2", "C2-N3", "N3-C4", "C4-N9", "C4-C5" ])
df_pyr = pd.DataFrame(l_pyrimidines, columns=["C1'-N1", "N1-C6", "C6-C5", "C5-C4", "C4-N3", "N3-C2", "C2-O2", "C2-N1", "C4-N4", "C4-O4"])
df = pd.concat([df_comm, df_pur, df_pyr], axis = 1)
pbar.close()
df.to_csv(runDir + "/results/geometry/all-atoms/distances/dist_atoms_" + name + ".csv")
@trace_unhandled_exceptions
def measures_pyle(name, s, thr_idx):
"""
Measures the distances and plane angles involving C1' and P atoms
Saves the results in a dataframe
"""
# do not recompute something already computed
if (os.path.isfile(runDir + '/results/geometry/Pyle/angles/flat_angles_pyle_' + name + '.csv') and
os.path.isfile(runDir + "/results/geometry/Pyle/distances/distances_pyle_" + name + ".csv")):
return
l_dist = []
l_angl = []
last_p = []
last_c1p = []
last_c4p = []
setproctitle(f"RNANet statistics.py Worker {thr_idx+1} measures_pyle({name})")
chain = next(s[0].get_chains())
for res in tqdm(chain, position=thr_idx+1, desc=f"Worker {thr_idx+1}: {name} measures_pyle", unit="res", leave=False):
p_c1p_psuiv = np.nan
c1p_psuiv_c1psuiv = np.nan
if res.get_resname() not in ['ATP', 'CCC', 'A3P', 'A23', 'GDP', 'RIA', "2BA"] :
atom_p = [ atom.get_coord() for atom in res if atom.get_name() == "P"]
atom_c1p = [ atom.get_coord() for atom in res if "C1'" in atom.get_fullname() ]
atom_c4p = [ atom.get_coord() for atom in res if "C4'" in atom.get_fullname() ]
if len(atom_c1p) > 1:
for atom in res:
if "C1'" in atom.get_fullname():
print("\n", atom.get_fullname(), "-", res.get_resname(), "\n")
p_c1p_psuiv = get_flat_angle(last_p, last_c1p, atom_p)
c1p_psuiv_c1psuiv = get_flat_angle(last_c1p, atom_p, atom_c1p)
c1p_psuiv = get_euclidian_distance(last_c1p, atom_p)
p_c1p = get_euclidian_distance(atom_p, atom_c1p)
c4p_psuiv = get_euclidian_distance(last_c4p, atom_p)
p_c4p = get_euclidian_distance(atom_p, atom_c4p)
last_p = atom_p
last_c1p = atom_c1p
last_c4p = atom_c4p
l_dist.append([res.get_resname(), c1p_psuiv, p_c1p, c4p_psuiv, p_c4p])
l_angl.append([res.get_resname(), p_c1p_psuiv, c1p_psuiv_c1psuiv])
df = pd.DataFrame(l_dist, columns=["Residue", "C1'-P", "P-C1'", "C4'-P", "P-C4'"])
df.to_csv(runDir + "/results/geometry/Pyle/distances/distances_pyle_" + name + ".csv")
df = pd.DataFrame(l_angl, columns=["Residue", "P-C1'-P°", "C1'-P°-C1'°"])
df.to_csv(runDir + "/results/geometry/Pyle/angles/flat_angles_pyle_"+name+".csv")
@trace_unhandled_exceptions
def measures_hrna(name, s, thr_idx):
"""
Measures the distance/angles between the atoms of the HiRE-RNA model linked by covalent bonds
"""
# do not recompute something already computed
if (os.path.isfile(runDir + '/results/geometry/HiRE-RNA/distances/distances_HiRERNA '+name+'.csv') and
os.path.isfile(runDir + '/results/geometry/HiRE-RNA/angles/angles_HiRERNA '+name+'.csv') and
os.path.isfile(runDir + '/results/geometry/HiRE-RNA/torsions/torsions_HiRERNA '+name+'.csv')):
return
l_dist = []
l_angl = []
l_tors = []
last_c4p = []
last_c5p = []
last_c1p = []
last_o5p = []
setproctitle(f"RNANet statistics.py Worker {thr_idx+1} measures_hrna({name})")
chain = next(s[0].get_chains())
residues=list(chain.get_residues())
for res in tqdm(chain, position=thr_idx+1, desc=f"Worker {thr_idx+1}: {name} measures_hrna", unit="res", leave=False):
# distances
p_o5p = None
o5p_c5p = None
c5p_c4p = None
c4p_c1p = None
c1p_b1 = None
b1_b2 = None
last_c4p_p = np.nan
# angles
p_o5p_c5p = None
o5p_c5p_c4p = None
c5p_c4p_c1p = None
c4p_c1p_b1 = None
c1p_b1_b2 = None
lastc4p_p_o5p = None
lastc5p_lastc4p_p = None
lastc1p_lastc4p_p = None
# torsions
p_o5_c5_c4 = np.nan
o5_c5_c4_c1 = np.nan
c5_c4_c1_b1 = np.nan
c4_c1_b1_b2 = np.nan
o5_c5_c4_psuiv = np.nan
c5_c4_psuiv_o5suiv = np.nan
c4_psuiv_o5suiv_c5suiv = np.nan
c1_c4_psuiv_o5suiv = np.nan
if res.get_resname() not in ['ATP', 'CCC', 'A3P', 'A23', 'GDP', 'RIA', "2BA"] : # several phosphate groups, ignore
atom_p = [ atom.get_coord() for atom in res if atom.get_name() == "P"]
atom_o5p = [ atom.get_coord() for atom in res if "O5'" in atom.get_fullname() ]
atom_c5p = [ atom.get_coord() for atom in res if "C5'" in atom.get_fullname() ]
atom_c4p = [ atom.get_coord() for atom in res if "C4'" in atom.get_fullname() ]
atom_c1p = [ atom.get_coord() for atom in res if "C1'" in atom.get_fullname() ]
atom_b1 = pos_b1(res) # position b1 to be calculated, depending on the case
atom_b2 = pos_b2(res) # position b2 to be calculated only for those with 2 cycles
# Distances. If one of the atoms is empty, the euclidian distance returns NaN.
last_c4p_p = get_euclidian_distance(last_c4p, atom_p)
p_o5p = get_euclidian_distance(atom_p, atom_o5p)
o5p_c5p = get_euclidian_distance(atom_o5p, atom_c5p)
c5p_c4p = get_euclidian_distance(atom_c5p, atom_c4p)
c4p_c1p = get_euclidian_distance(atom_c4p, atom_c1p)
c1p_b1 = get_euclidian_distance(atom_c1p, atom_b1)
b1_b2 = get_euclidian_distance(atom_b1, atom_b2)
# flat angles. Same.
lastc4p_p_o5p = get_flat_angle(last_c4p, atom_p, atom_o5p)
lastc1p_lastc4p_p = get_flat_angle(last_c1p, last_c4p, atom_p)
lastc5p_lastc4p_p = get_flat_angle(last_c5p, last_c4p, atom_p)
p_o5p_c5p = get_flat_angle(atom_p, atom_o5p, atom_c5p)
o5p_c5p_c4p = get_flat_angle(atom_o5p, atom_c5p, atom_c4p)
c5p_c4p_c1p = get_flat_angle(atom_c5p, atom_c4p, atom_c1p)
c4p_c1p_b1 = get_flat_angle(atom_c4p, atom_c1p, atom_b1)
c1p_b1_b2 = get_flat_angle(atom_c1p, atom_b1, atom_b2)
# torsions. Idem.
p_o5_c5_c4 = get_torsion_angle(atom_p, atom_o5p, atom_c5p, atom_c4p)
o5_c5_c4_c1 = get_torsion_angle(atom_o5p, atom_c5p, atom_c4p, atom_c1p)
c5_c4_c1_b1 = get_torsion_angle(atom_c5p, atom_c4p, atom_c1p, atom_b1)
c4_c1_b1_b2 = get_torsion_angle(atom_c4p, atom_c1p, atom_b1, atom_b2)
o5_c5_c4_psuiv = get_torsion_angle(last_o5p, last_c5p, last_c4p, atom_p)
c5_c4_psuiv_o5suiv = get_torsion_angle(last_c5p, last_c4p, atom_p, atom_o5p)
c4_psuiv_o5suiv_c5suiv = get_torsion_angle(last_c4p, atom_p, atom_o5p, atom_c5p)
c1_c4_psuiv_o5suiv = get_torsion_angle(last_c1p, last_c4p, atom_p, atom_o5p)
last_c4p = atom_c4p
last_c5p = atom_c5p
last_c1p = atom_c1p
last_o5p = atom_o5p
l_dist.append([res.get_resname(), last_c4p_p, p_o5p, o5p_c5p, c5p_c4p, c4p_c1p, c1p_b1, b1_b2])
l_angl.append([res.get_resname(), lastc4p_p_o5p, lastc1p_lastc4p_p, lastc5p_lastc4p_p, p_o5p_c5p, o5p_c5p_c4p, c5p_c4p_c1p, c4p_c1p_b1, c1p_b1_b2])
l_tors.append([res.get_resname(), p_o5_c5_c4, o5_c5_c4_c1, c5_c4_c1_b1, c4_c1_b1_b2, o5_c5_c4_psuiv, c5_c4_psuiv_o5suiv, c4_psuiv_o5suiv_c5suiv, c1_c4_psuiv_o5suiv])
df = pd.DataFrame(l_dist, columns=["Residue", "C4'-P", "P-O5'", "O5'-C5'", "C5'-C4'", "C4'-C1'", "C1'-B1", "B1-B2"])
df.to_csv(runDir + '/results/geometry/HiRE-RNA/distances/distances_HiRERNA '+name+'.csv')
df = pd.DataFrame(l_angl, columns=["Residue", "C4'-P-O5'", "C1'-C4'-P", "C5'-C4'-P", "P-O5'-C5'", "O5'-C5'-C4'", "C5'-C4'-C1'", "C4'-C1'-B1", "C1'-B1-B2"])
df.to_csv(runDir + '/results/geometry/HiRE-RNA/angles/angles_HiRERNA ' + name + ".csv")
df=
|
pd.DataFrame(l_tors, columns=["Residue", "P-O5'-C5'-C4'", "O5'-C5'-C4'-C1'", "C5'-C4'-C1'-B1", "C4'-C1'-B1-B2", "O5'-C5'-C4'-P°", "C5'-C4'-P°-O5'°", "C4'-P°-O5'°-C5'°", "C1'-C4'-P°-O5'°"])
|
pandas.DataFrame
|
from __future__ import annotations
import csv
import logging
import os
import re
from typing import TYPE_CHECKING
from clevercsv.wrappers import read_dataframe
import pandas as pd
from boadata.core import DataObject
from boadata.core.data_conversion import ChainConversion, IdentityConversion
from .pandas_types import PandasDataFrameBase
if TYPE_CHECKING:
from typing import Optional
from boadata.data.text_types import TextFile
@DataObject.register_type()
@IdentityConversion.enable_to("pandas_data_frame")
@ChainConversion.enable_to("numpy_array", through="pandas_data_frame")
class CSVFile(PandasDataFrameBase):
type_name = "csv"
def __to_text__(self, **kwargs) -> TextFile:
constructor = DataObject.registered_types["text"]
return constructor.from_uri(self.uri, source=self, **kwargs)
@classmethod
def accepts_uri(cls, uri: str) -> bool:
return bool(re.search("\\.[tc]sv(\\.gz)?$", uri.lower()))
@classmethod
def _fallback_read(cls, uri: str, **kwargs) -> pd.DataFrame:
with open(uri, "r") as fin:
lines = [line for line in csv.reader(fin)]
try:
return pd.DataFrame(lines[1:], columns=lines[0]).infer_objects(
# convert_numeric=True
)
except:
return pd.DataFrame(lines).infer_objects() # convert_numeric=True)
@classmethod
def from_uri(cls, uri: str, index_col=False, source: Optional[DataObject] = None, **kwargs) -> "CSVFile":
if not "sep" in kwargs and re.search("\\.tsv(\\.gz)?", uri.lower()):
kwargs["sep"] = "\\t"
def _clever_csv_read():
return read_dataframe(uri, **kwargs)
methods = {
"clevercsv": _clever_csv_read,
"pandas_c": lambda:
|
pd.read_csv(uri, index_col=index_col, **kwargs)
|
pandas.read_csv
|
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import Index, MultiIndex, Series, date_range, isna
import pandas._testing as tm
@pytest.fixture(
params=[
"linear",
"index",
"values",
"nearest",
"slinear",
"zero",
"quadratic",
"cubic",
"barycentric",
"krogh",
"polynomial",
"spline",
"piecewise_polynomial",
"from_derivatives",
"pchip",
"akima",
"cubicspline",
]
)
def nontemporal_method(request):
"""Fixture that returns an (method name, required kwargs) pair.
This fixture does not include method 'time' as a parameterization; that
method requires a Series with a DatetimeIndex, and is generally tested
separately from these non-temporal methods.
"""
method = request.param
kwargs = {"order": 1} if method in ("spline", "polynomial") else {}
return method, kwargs
@pytest.fixture(
params=[
"linear",
"slinear",
"zero",
"quadratic",
"cubic",
"barycentric",
"krogh",
"polynomial",
"spline",
"piecewise_polynomial",
"from_derivatives",
"pchip",
"akima",
"cubicspline",
]
)
def interp_methods_ind(request):
"""Fixture that returns a (method name, required kwargs) pair to
be tested for various Index types.
This fixture does not include methods - 'time', 'index', 'nearest',
'values' as a parameterization
"""
method = request.param
kwargs = {"order": 1} if method in ("spline", "polynomial") else {}
return method, kwargs
class TestSeriesInterpolateData:
def test_interpolate(self, datetime_series, string_series):
ts = Series(np.arange(len(datetime_series), dtype=float), datetime_series.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method="linear")
tm.assert_series_equal(linear_interp, ts)
ord_ts = Series(
[d.toordinal() for d in datetime_series.index], index=datetime_series.index
).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method="time")
tm.assert_series_equal(time_interp, ord_ts)
def test_interpolate_time_raises_for_non_timeseries(self):
# When method='time' is used on a non-TimeSeries that contains a null
# value, a ValueError should be raised.
non_ts = Series([0, 1, 2, np.NaN])
msg = "time-weighted interpolation only works on Series.* with a DatetimeIndex"
with pytest.raises(ValueError, match=msg):
non_ts.interpolate(method="time")
@td.skip_if_no_scipy
def test_interpolate_cubicspline(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
result = ser.reindex(new_index).interpolate(method="cubicspline")[1:3]
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_interpolate_pchip(self):
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(
Index([49.25, 49.5, 49.75, 50.25, 50.5, 50.75])
).astype(float)
interp_s = ser.reindex(new_index).interpolate(method="pchip")
# does not blow up, GH5977
interp_s[49:51]
@td.skip_if_no_scipy
def test_interpolate_akima(self):
ser = Series([10, 11, 12, 13])
# interpolate at new_index where `der` is zero
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="akima")
tm.assert_series_equal(interp_s[1:3], expected)
# interpolate at new_index where `der` is a non-zero int
expected = Series(
[11.0, 1.0, 1.0, 1.0, 12.0, 1.0, 1.0, 1.0, 13.0],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="akima", der=1)
tm.assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_piecewise_polynomial(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="piecewise_polynomial")
tm.assert_series_equal(interp_s[1:3], expected)
@td.skip_if_no_scipy
def test_interpolate_from_derivatives(self):
ser = Series([10, 11, 12, 13])
expected = Series(
[11.00, 11.25, 11.50, 11.75, 12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]),
)
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75])).astype(
float
)
interp_s = ser.reindex(new_index).interpolate(method="from_derivatives")
tm.assert_series_equal(interp_s[1:3], expected)
@pytest.mark.parametrize(
"kwargs",
[
{},
pytest.param(
{"method": "polynomial", "order": 1}, marks=td.skip_if_no_scipy
),
],
)
def test_interpolate_corners(self, kwargs):
s = Series([np.nan, np.nan])
tm.assert_series_equal(s.interpolate(**kwargs), s)
s = Series([], dtype=object).interpolate()
tm.assert_series_equal(s.interpolate(**kwargs), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method="index")
expected = s.copy()
bad = isna(expected.values)
good = ~bad
expected = Series(
np.interp(vals[bad], vals[good], s.values[good]), index=s.index[bad]
)
tm.assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method="values")
tm.assert_series_equal(other_result, result)
tm.assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
msg = (
"time-weighted interpolation only works on Series or DataFrames "
"with a DatetimeIndex"
)
with pytest.raises(ValueError, match=msg):
s.interpolate(method="time")
@pytest.mark.parametrize(
"kwargs",
[
{},
pytest.param(
{"method": "polynomial", "order": 1}, marks=td.skip_if_no_scipy
),
],
)
def test_nan_interpolate(self, kwargs):
s = Series([0, 1, np.nan, 3])
result = s.interpolate(**kwargs)
expected = Series([0.0, 1.0, 2.0, 3.0])
tm.assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1.0, 2.0, 3.0, 4.0], index=[1, 3, 5, 9])
tm.assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list("abcd"))
result = s.interpolate()
expected = Series([0.0, 1.0, 2.0, 2.0], index=list("abcd"))
|
tm.assert_series_equal(result, expected)
|
pandas._testing.assert_series_equal
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import torch
import seaborn as sns
from mpl_toolkits import axes_grid1
def _im_plot(
m,
figsize,
cmap,
xlabels,
ylabels,
path,
highlight_significant=False,
vmin=None,
vmax=None,
):
fig = plt.figure(figsize=figsize)
plt.clf()
ax = fig.gca()
res = ax.imshow(
np.array(m), cmap=cmap, interpolation="nearest", vmin=vmin, vmax=vmax
)
_add_colorbar(res)
height, width = m.shape
plt.xticks(range(width), xlabels, rotation=90)
plt.yticks(range(height), ylabels, rotation=0)
plt.tight_layout()
if highlight_significant:
# highlighting values 1 std away from mean
significant = np.array(
((m - m.mean(dim=0)).abs() > m.std(dim=0))
& ((m - m.mean(dim=0)).abs() > 0.2)
)
for x in range(width):
for y in range(height):
if significant[y][x]:
ax.annotate(
str("O"),
xy=(x, y),
horizontalalignment="center",
verticalalignment="center",
)
plt.savefig(path, format="png")
def write_mixing_matrix(process):
m = process.mixing_matrix
client_names = [p.name for p in process.participants]
_im_plot(
m=m,
figsize=(8, 8),
cmap="BuGn",
xlabels=client_names,
ylabels=client_names,
path=_get_path(process, "mixing_matrix"),
)
def _get_layers_as_dict(process):
local_weight_names = [
x
for x in process.participants[0]._model.state_dict().keys()
if x in process.config["local_layers"]
]
images = {}
for weight_name in local_weight_names:
list_of_images = []
for p in process.participants:
list_of_images.append(p._model.state_dict()[weight_name].cpu())
img = torch.stack(list_of_images, dim=0).flatten(1)
images[weight_name] = img
return images
def _write_as_csv(weight_name, img, client_names, feature_names, logdir):
df = pd.DataFrame(img.cpu().numpy(), columns=feature_names, index=client_names)
df.to_csv(logdir + str(weight_name) + ".csv")
def write_local_weights(process):
images = _get_layers_as_dict(process)
for weight_name in images.keys():
img = images[weight_name]
if weight_name.endswith("_w"): # multiplicative
vmin = -1
vmax = 3
cmap = "PuOr"
else: # additive
vmin = -1
vmax = 1
cmap = "PiYG"
height, width = img.shape
img_width = (width / 3) + 4
img_height = (height / 3) + 3
# axis labels
client_names = [p.name for p in process.participants]
feature_names = range(width)
if width == len(process.fl_dataset.feature_names):
feature_names = process.fl_dataset.feature_names
_write_as_csv(
weight_name, img, client_names, feature_names, process.config["logdir"]
)
_im_plot(
m=img,
figsize=(img_width, img_height),
cmap=cmap,
xlabels=feature_names,
ylabels=client_names,
path=_get_path(process, weight_name),
highlight_significant=True,
vmin=vmin,
vmax=vmax,
)
def _get_path(process, name):
return (
process.config["logdir"]
+ process.config["experiment_name"]
+ "-"
+ process.config["run_name"]
+ "-"
+ name
+ ".png"
)
def _add_colorbar(im, aspect=20, pad_fraction=0.5, **kwargs):
"""Add a vertical color bar to an image plot."""
divider = axes_grid1.make_axes_locatable(im.axes)
width = axes_grid1.axes_size.AxesY(im.axes, aspect=1.0 / aspect)
pad = axes_grid1.axes_size.Fraction(pad_fraction, width)
current_ax = plt.gca()
cax = divider.append_axes("right", size=width, pad=pad)
plt.sca(current_ax)
return im.axes.figure.colorbar(im, cax=cax, **kwargs)
def visualize_features(process):
input_images = _get_layers_as_dict(process)
bias = input_images["feature_b"]
mult = input_images["feature_w"]
combined_df = None
for i, train_data in enumerate(process.fl_dataset.fl_train_datasets):
original = pd.DataFrame(
train_data.x.numpy(), columns=process.fl_dataset.feature_names,
)
melted = original.melt(var_name="feature")
melted["value"] = melted["value"] + np.random.normal(
0.0, 0.01, melted["value"].shape
)
melted["name"] = process.fl_dataset.dataset_names[i]
melted["type"] = "original"
affine = (train_data.x.numpy() * mult[i].numpy()) + bias[i].numpy()
affine = pd.DataFrame(affine, columns=process.fl_dataset.feature_names,)
melted_affine = affine.melt(var_name="feature")
melted_affine["value"] = melted_affine["value"] + np.random.normal(
0.0, 0.01, melted_affine["value"].shape
)
melted_affine["name"] = process.fl_dataset.dataset_names[i]
melted_affine["type"] = "transformed"
combined = pd.concat([melted, melted_affine], axis=0)
if combined_df is None:
combined_df = combined
else:
combined_df = pd.concat([combined_df, combined], axis=0)
plt.clf()
sns.displot(
combined_df,
kind="kde", # hist
x="value",
col="feature",
row="name",
hue="type",
multiple="layer", # fill, layer, stack, dodge
height=2.5,
aspect=1.0,
bw_adjust=0.1,
facet_kws={"sharex": "col", "sharey": False, "margin_titles": True},
)
plt.savefig(
_get_path(process, "input-shift"), format="png",
)
def write_n_samples(process):
client_names = [p.name for p in process.participants]
n_train_samples = [
p.dataset_loader.train_loader.n_samples for p in process.participants
]
n_test_samples = [
p.dataset_loader.test_loader.n_samples for p in process.participants
]
train =
|
pd.DataFrame(n_train_samples, index=client_names, columns=["value"])
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.