prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os
import numpy as np
import pandas as pd
from sklearn import linear_model
def allign_alleles(df):
"""Look for reversed alleles and inverts the z-score for one of them.
Here, we take advantage of numpy's vectorized functions for performance.
"""
d = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
a = [] # array of alleles
for colname in ['A1_ref', 'A2_ref', 'A1_gen', 'A2_gen', 'A1_y', 'A2_y']:
tmp = np.empty(len(df[colname]), dtype=int)
for k, v in d.items():
tmp[np.array(df[colname]) == k] = v
a.append(tmp)
matched_alleles_gen = (((a[0] == a[2]) & (a[1] == a[3])) |
((a[0] == 3 - a[2]) & (a[1] == 3 - a[3])))
reversed_alleles_gen = (((a[0] == a[3]) & (a[1] == a[2])) |
((a[0] == 3 - a[3]) & (a[1] == 3 - a[2])))
matched_alleles_y = (((a[0] == a[4]) & (a[1] == a[5])) |
((a[0] == 3 - a[4]) & (a[1] == 3 - a[5])))
reversed_alleles_y = (((a[0] == a[5]) & (a[1] == a[4])) |
((a[0] == 3 - a[5]) & (a[1] == 3 - a[4])))
df['Z_y'] *= -2 * reversed_alleles_y + 1
df['reversed'] = reversed_alleles_gen
df = df[((matched_alleles_y|reversed_alleles_y)&(matched_alleles_gen|reversed_alleles_gen))]
def get_files(file_name, chr):
if '@' in file_name:
valid_files = []
if chr is None:
for i in range(1, 23):
cur_file = file_name.replace('@', str(i))
if os.path.isfile(cur_file):
valid_files.append(cur_file)
else:
raise ValueError('No file matching {} for chr {}'.format(
file_name, i))
else:
cur_file = file_name.replace('@', chr)
if os.path.isfile(cur_file):
valid_files.append(cur_file)
else:
raise ValueError('No file matching {} for chr {}'.format(
file_name, chr))
return valid_files
else:
if os.path.isfile(file_name):
return [file_name]
else:
ValueError('No files matching {}'.format(file_name))
def prep(bfile, genotype, sumstats2, N2, phenotype, covariates, chr, start, end):
bim_files = get_files(bfile + '.bim', chr)
genotype_files = get_files(genotype + '.bim', chr)
# read in bim files
bims = [pd.read_csv(f,
header=None,
names=['CHR', 'SNP', 'CM', 'BP', 'A1', 'A2'],
delim_whitespace=True) for f in bim_files]
bim = pd.concat(bims, ignore_index=True)
genotype_bims = [pd.read_csv(f,
header=None,
names=['CHR', 'SNP', 'CM', 'BP', 'A1', 'A2'],
delim_whitespace=True) for f in genotype_files]
genotype_bim = pd.concat(genotype_bims, ignore_index=True)
if chr is not None:
if start is None:
start = 0
if end is None:
end = float('inf')
genotype_bim = genotype_bim[np.logical_and(np.logical_and(genotype_bim['CHR']==chr, genotype_bim['BP']<=end), genotype_bim['BP']>=start)].reset_index(drop=True)
bim = bim[np.logical_and(np.logical_and(bim['CHR']==chr, bim['BP']<=end), bim['BP']>=start)].reset_index(drop=True)
summary_stats = pd.read_csv(sumstats2, delim_whitespace=True)
# rename cols
bim.rename(columns={'CHR': 'CHR_ref', 'CM': 'CM_ref', 'BP':'BP_ref', 'A1': 'A1_ref', 'A2': 'A2_ref'}, inplace=True)
genotype_bim.rename(columns={'CHR': 'CHR_gen', 'CM': 'CM_gen', 'BP':'BP_gen', 'A1': 'A1_gen', 'A2': 'A2_gen'}, inplace=True)
summary_stats.rename(columns={'A1': 'A1_y', 'A2': 'A2_y', 'N': 'N_y', 'Z': 'Z_y'}, inplace=True)
# take overlap between output and ref genotype files
df = | pd.merge(bim, genotype_bim, on=['SNP']) | pandas.merge |
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
import assetallocation_arp.models.ARP as arp
# Parameters
TIMES_LAG=3
settings=arp.dataimport_settings("Settings")
# Change the universe of markets that is being used
markets="Leverage_MATR" # All "Leverage_all_markets" / Minimalist "Leverage_min"
# Leverage/scaling of individual markets
sleverage ="v" #Equal(e) / Normative(n) / Volatility(v) / Standalone(s)
def signal (index):
sig= | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
from copy import deepcopy
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt.utils.random_ import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.portfolio['attach_call_seq'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_execute_order_nb():
# Errors, ignored and rejected orders
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(-100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.nan, 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.inf, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.nan, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., np.nan, 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., -10., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., np.nan, 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., -100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, -10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=0))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=-10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=np.nan))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., np.nan, 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., -10., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(0, 10))
assert exec_state == ExecuteOrderState(cash=100.0, position=10.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(15, 10, max_size=10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=1.))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100, 0., np.inf, np.nan, 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(100, 10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-200, 10, direction=Direction.LongOnly, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, fixed_fees=1000))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=180.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=909.0, position=-100.0, debt=900.0, free_cash=-891.0)
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=7.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=125.0, position=-2.5, debt=25.0, free_cash=75.0)
assert_same_tuple(order_result, OrderResult(
size=7.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-2.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=-2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-7.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-10.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(150., -5., 0., 150., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=300.0, position=-20.0, debt=150.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=50.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(1000., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 17.5, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=850.0, position=3.571428571428571, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.571428571428571, price=17.5, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 100, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=37.5, position=-4.375, debt=43.75, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=0.625, price=100.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 10., 0., -50., 10., 100., 0, 0),
nb.order_nb(-20, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=150.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 1., 0., -50., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=10.0, position=0.0, debt=0.0, free_cash=-40.0)
assert_same_tuple(order_result, OrderResult(
size=1.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., -100., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=-100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-20, 10, fees=0.1, slippage=0.1, fixed_fees=1., lock_cash=True))
assert exec_state == ExecuteOrderState(cash=80.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_orders ############# #
order_size = pd.Series([np.inf, -np.inf, np.nan, np.inf, -np.inf], index=price.index)
order_size_wide = order_size.vbt.tile(3, keys=['a', 'b', 'c'])
order_size_one = pd.Series([1, -1, np.nan, 1, -1], index=price.index)
def from_orders_both(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='both', **kwargs)
def from_orders_longonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='longonly', **kwargs)
def from_orders_shortonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='shortonly', **kwargs)
class TestFromOrders:
def test_one_column(self):
record_arrays_close(
from_orders_both().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_orders_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1), (8, 2, 0, 100.0, 1.0, 0.0, 0),
(9, 2, 1, 100.0, 2.0, 0.0, 1), (10, 2, 3, 50.0, 4.0, 0.0, 0), (11, 2, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0), (4, 2, 0, 100.0, 1.0, 0.0, 1), (5, 2, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_size_inf(self):
record_arrays_close(
from_orders_both(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_orders_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 198.01980198019803, 2.02, 0.0, 1),
(2, 0, 3, 99.00990099009901, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 1),
(2, 0, 3, 49.504950495049506, 4.04, 0.0, 0), (3, 0, 4, 49.504950495049506, 5.05, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1),
(2, 0, 3, 50.0, 4.0, 0.0, 0), (3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 0), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 1), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.2, 0),
(6, 1, 3, 1.0, 4.0, 0.4, 1), (7, 1, 4, 1.0, 5.0, 0.5, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 2.0, 0), (10, 2, 3, 1.0, 4.0, 4.0, 1), (11, 2, 4, 1.0, 5.0, 5.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.1, 0),
(6, 1, 3, 1.0, 4.0, 0.1, 1), (7, 1, 4, 1.0, 5.0, 0.1, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 1.0, 0), (10, 2, 3, 1.0, 4.0, 1.0, 1), (11, 2, 4, 1.0, 5.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_orders_both(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 0.9, 0.0, 1), (5, 1, 1, 1.0, 2.2, 0.0, 0),
(6, 1, 3, 1.0, 3.6, 0.0, 1), (7, 1, 4, 1.0, 5.5, 0.0, 0), (8, 2, 0, 1.0, 0.0, 0.0, 1),
(9, 2, 1, 1.0, 4.0, 0.0, 0), (10, 2, 3, 1.0, 0.0, 0.0, 1), (11, 2, 4, 1.0, 10.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0), (2, 0, 3, 0.5, 4.0, 0.0, 1),
(3, 0, 4, 0.5, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0), (8, 2, 0, 1.0, 1.0, 0.0, 1),
(9, 2, 1, 1.0, 2.0, 0.0, 0), (10, 2, 3, 1.0, 4.0, 0.0, 1), (11, 2, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_orders_both(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 1, 1.0, 2.0, 0.0, 1), (5, 1, 3, 1.0, 4.0, 0.0, 0),
(6, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 3, 1.0, 4.0, 0.0, 0), (5, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_lock_cash(self):
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[143.12812469365747, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-49.5, -49.5]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[94.6034702480149, 47.54435839623566]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[49.5, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[1.4312812469365748, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-96.16606313106556, -96.16606313106556]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[0.4699090272918124, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[98.06958012596222, 98.06958012596222]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = from_orders_both(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 1000., 2., 0., 1),
(2, 0, 3, 500., 4., 0., 0), (3, 0, 4, 1000., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 200., 2., 0., 1),
(6, 1, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-6600.0, 0.0]
])
)
pf = from_orders_longonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 100., 2., 0., 1),
(2, 0, 3, 50., 4., 0., 0), (3, 0, 4, 50., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 100., 2., 0., 1),
(6, 1, 3, 50., 4., 0., 0), (7, 1, 4, 50., 5., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[200.0, 200.0],
[200.0, 200.0],
[0.0, 0.0],
[250.0, 250.0]
])
)
pf = from_orders_shortonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1000., 1., 0., 1), (1, 0, 1, 550., 2., 0., 0),
(2, 0, 3, 1000., 4., 0., 1), (3, 0, 4, 800., 5., 0., 0),
(4, 1, 0, 100., 1., 0., 1), (5, 1, 1, 100., 2., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[-900.0, 0.0],
[-900.0, 0.0],
[-900.0, 0.0],
[-4900.0, 0.0],
[-3989.6551724137926, 0.0]
])
)
def test_allow_partial(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1), (4, 1, 1, 1000.0, 2.0, 0.0, 1), (5, 1, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0), (4, 1, 0, 1000.0, 1.0, 0.0, 1), (5, 1, 3, 1000.0, 4.0, 0.0, 1),
(6, 1, 4, 1000.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_orders_both(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_longonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_shortonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_orders_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0,
100.0, 0.0, 0.0, 1.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 1, 0.0, 100.0, 0.0, 0.0, 2.0, 200.0, -np.inf, 2.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 2.0, 200.0, 200.0, 2.0, 0.0, 1, 0, -1, 1),
(2, 0, 0, 2, 400.0, -100.0, 200.0, 0.0, 3.0, 100.0, np.nan, 3.0, 0,
2, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 3.0, 100.0, np.nan, np.nan, np.nan, -1, 1, 0, -1),
(3, 0, 0, 3, 400.0, -100.0, 200.0, 0.0, 4.0, 0.0, np.inf, 4.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 4.0, 0.0, 100.0, 4.0, 0.0, 0, 0, -1, 2),
(4, 0, 0, 4, 0.0, 0.0, 0.0, 0.0, 5.0, 0.0, -np.inf, 5.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 5.0, 0.0, np.nan, np.nan, np.nan, -1, 2, 6, -1)
], dtype=log_dt)
)
def test_group_by(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
size=pd.DataFrame([
[0., 0., np.inf],
[0., np.inf, -np.inf],
[np.inf, -np.inf, 0.],
[-np.inf, 0., np.inf],
[0., np.inf, -np.inf],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_orders_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
def test_value(self):
record_arrays_close(
from_orders_both(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0),
(2, 0, 3, 0.25, 4.0, 0.0, 1), (3, 0, 4, 0.2, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_amount(self):
record_arrays_close(
from_orders_both(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=75., size_type='targetamount',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_value(self):
record_arrays_close(
from_orders_both(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 25.0, 2.0, 0.0, 0), (7, 1, 2, 8.333333333333332, 3.0, 0.0, 0),
(8, 1, 3, 4.166666666666668, 4.0, 0.0, 0), (9, 1, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 25.0, 2.0, 0.0, 0),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 0), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 0),
(4, 0, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=50., size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0),
(2, 0, 1, 25.0, 2.0, 0.0, 1), (3, 1, 1, 25.0, 2.0, 0.0, 1),
(4, 2, 1, 25.0, 2.0, 0.0, 0), (5, 0, 2, 8.333333333333332, 3.0, 0.0, 1),
(6, 1, 2, 8.333333333333332, 3.0, 0.0, 1), (7, 2, 2, 8.333333333333332, 3.0, 0.0, 1),
(8, 0, 3, 4.166666666666668, 4.0, 0.0, 1), (9, 1, 3, 4.166666666666668, 4.0, 0.0, 1),
(10, 2, 3, 4.166666666666668, 4.0, 0.0, 1), (11, 0, 4, 2.5, 5.0, 0.0, 1),
(12, 1, 4, 2.5, 5.0, 0.0, 1), (13, 2, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
def test_target_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 37.5, 2.0, 0.0, 0), (7, 1, 2, 6.25, 3.0, 0.0, 0), (8, 1, 3, 2.34375, 4.0, 0.0, 0),
(9, 1, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 37.5, 2.0, 0.0, 0), (2, 0, 2, 6.25, 3.0, 0.0, 0),
(3, 0, 3, 2.34375, 4.0, 0.0, 0), (4, 0, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_update_value(self):
record_arrays_close(
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=False).order_records,
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=True).order_records
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=False).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.9465661198057499, 2.02, 0.019120635620076154, 0),
(4, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(5, 1, 2, 0.018558300554959377, 3.0300000000000002, 0.0005623165068152705, 0),
(6, 0, 3, 0.00037870218456959037, 3.96, 1.4996606508955778e-05, 1),
(7, 1, 3, 0.0003638525743521767, 4.04, 1.4699644003827875e-05, 0),
(8, 0, 4, 7.424805112066224e-06, 4.95, 3.675278530472781e-07, 1),
(9, 1, 4, 7.133664827307231e-06, 5.05, 3.6025007377901643e-07, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.7303208018821721, 2.02, 0.014752480198019875, 0),
(4, 2, 1, 0.21624531792357785, 2.02, 0.0043681554220562635, 0),
(5, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(6, 1, 2, 0.009608602243410758, 2.9699999999999998, 0.00028537548662929945, 1),
(7, 2, 2, 0.02779013180558861, 3.0300000000000002, 0.0008420409937093393, 0),
(8, 0, 3, 0.0005670876809631409, 3.96, 2.2456672166140378e-05, 1),
(9, 1, 3, 0.00037770350099464167, 3.96, 1.4957058639387809e-05, 1),
(10, 2, 3, 0.0009077441794302741, 4.04, 3.6672864848982974e-05, 0),
(11, 0, 4, 1.8523501267964093e-05, 4.95, 9.169133127642227e-07, 1),
(12, 1, 4, 1.2972670177191503e-05, 4.95, 6.421471737709794e-07, 1),
(13, 2, 4, 3.0261148547590434e-05, 5.05, 1.5281880016533242e-06, 0)
], dtype=order_dt)
)
def test_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0), (5, 1, 0, 50., 1., 0., 1),
(6, 1, 1, 12.5, 2., 0., 1), (7, 1, 2, 4.16666667, 3., 0., 1),
(8, 1, 3, 1.5625, 4., 0., 1), (9, 1, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 1, 12.5, 2., 0., 1),
(2, 0, 2, 4.16666667, 3., 0., 1), (3, 0, 3, 1.5625, 4., 0., 1),
(4, 0, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 5.00000000e+01, 1., 0., 0), (1, 1, 0, 2.50000000e+01, 1., 0., 0),
(2, 2, 0, 1.25000000e+01, 1., 0., 0), (3, 0, 1, 3.12500000e+00, 2., 0., 0),
(4, 1, 1, 1.56250000e+00, 2., 0., 0), (5, 2, 1, 7.81250000e-01, 2., 0., 0),
(6, 0, 2, 2.60416667e-01, 3., 0., 0), (7, 1, 2, 1.30208333e-01, 3., 0., 0),
(8, 2, 2, 6.51041667e-02, 3., 0., 0), (9, 0, 3, 2.44140625e-02, 4., 0., 0),
(10, 1, 3, 1.22070312e-02, 4., 0., 0), (11, 2, 3, 6.10351562e-03, 4., 0., 0),
(12, 0, 4, 2.44140625e-03, 5., 0., 0), (13, 1, 4, 1.22070312e-03, 5., 0., 0),
(14, 2, 4, 6.10351562e-04, 5., 0., 0)
], dtype=order_dt)
)
def test_auto_seq(self):
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value, size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value / 100, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
def test_max_orders(self):
_ = from_orders_both(close=price_wide)
_ = from_orders_both(close=price_wide, max_orders=9)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, max_orders=8)
def test_max_logs(self):
_ = from_orders_both(close=price_wide, log=True)
_ = from_orders_both(close=price_wide, log=True, max_logs=15)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, log=True, max_logs=14)
# ############# from_signals ############# #
entries = pd.Series([True, True, True, False, False], index=price.index)
entries_wide = entries.vbt.tile(3, keys=['a', 'b', 'c'])
exits = pd.Series([False, False, True, True, True], index=price.index)
exits_wide = exits.vbt.tile(3, keys=['a', 'b', 'c'])
def from_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='both', **kwargs)
def from_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='longonly', **kwargs)
def from_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='shortonly', **kwargs)
def from_ls_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, False, exits, False, **kwargs)
def from_ls_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, False, False, **kwargs)
def from_ls_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, False, False, entries, exits, **kwargs)
class TestFromSignals:
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_one_column(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_multiple_columns(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 200., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 100., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0),
(2, 1, 0, 100., 1., 0., 1), (3, 1, 3, 50., 4., 0., 0),
(4, 2, 0, 100., 1., 0., 1), (5, 2, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_custom_signal_func(self):
@njit
def signal_func_nb(c, long_num_arr, short_num_arr):
long_num = nb.get_elem_nb(c, long_num_arr)
short_num = nb.get_elem_nb(c, short_num_arr)
is_long_entry = long_num > 0
is_long_exit = long_num < 0
is_short_entry = short_num > 0
is_short_exit = short_num < 0
return is_long_entry, is_long_exit, is_short_entry, is_short_exit
pf_base = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
entries=pd.Series([True, False, False, False, False]),
exits=pd.Series([False, False, True, False, False]),
short_entries=pd.Series([False, True, False, True, False]),
short_exits=pd.Series([False, False, False, False, True]),
size=1,
upon_opposite_entry='ignore'
)
pf = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
signal_func_nb=signal_func_nb,
signal_args=(vbt.Rep('long_num_arr'), vbt.Rep('short_num_arr')),
broadcast_named_args=dict(
long_num_arr=pd.Series([1, 0, -1, 0, 0]),
short_num_arr=pd.Series([0, 1, 0, 1, -1])
),
size=1,
upon_opposite_entry='ignore'
)
record_arrays_close(
pf_base.order_records,
pf.order_records
)
def test_amount(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 2.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_value(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 0.3125, 4.0, 0.0, 1),
(2, 1, 4, 0.1775, 5.0, 0.0, 1), (3, 2, 0, 100.0, 1.0, 0.0, 0),
(4, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
with pytest.raises(Exception):
_ = from_signals_both(size=0.5, size_type='percent')
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1), (2, 0, 4, 25., 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close',
accumulate=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 0),
(2, 0, 3, 62.5, 4.0, 0.0, 1), (3, 0, 4, 27.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 3, 37.5, 4., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 25., 1., 0., 0),
(2, 2, 0, 12.5, 1., 0., 0), (3, 0, 3, 50., 4., 0., 1),
(4, 1, 3, 25., 4., 0., 1), (5, 2, 3, 12.5, 4., 0., 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_signals_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 3, 198.01980198019803, 4.04, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099, 1.01, 0., 0), (1, 0, 3, 99.00990099, 4.04, 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 3, 49.504950495049506, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_signals_both(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.8, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 8.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.4, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 4.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.4, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 4.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_signals_both(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.1, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_signals_both(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 2.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 2.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 1.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 0.9, 0.0, 1),
(3, 1, 3, 1.0, 4.4, 0.0, 0), (4, 2, 0, 1.0, 0.0, 0.0, 1), (5, 2, 3, 1.0, 8.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_signals_both(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_signals_both(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 0, 4, 0.5, 5.0, 0.0, 1),
(3, 1, 0, 1.0, 1.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1), (4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 3, 0.5, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_signals_both(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1), (2, 1, 3, 1000.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 3, 275.0, 4.0, 0.0, 0), (2, 1, 0, 1000.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=True, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_both(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_signals_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2, 0.0, 0.0,
0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 100.0, 0.0, 0.0, 1.0,
100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 3, 0.0, 100.0, 0.0, 0.0, 4.0, 400.0, -np.inf, 4.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 800.0, -100.0,
400.0, 0.0, 4.0, 400.0, 200.0, 4.0, 0.0, 1, 0, -1, 1)
], dtype=log_dt)
)
def test_accumulate(self):
record_arrays_close(
from_signals_both(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 3.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 1.0, 4.0, 0.0, 1), (8, 2, 4, 1.0, 5.0, 0.0, 1),
(9, 3, 0, 1.0, 1.0, 0.0, 0), (10, 3, 1, 1.0, 2.0, 0.0, 0), (11, 3, 3, 1.0, 4.0, 0.0, 1),
(12, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 2.0, 4.0, 0.0, 1), (5, 2, 0, 1.0, 1.0, 0.0, 0),
(6, 2, 3, 1.0, 4.0, 0.0, 1), (7, 3, 0, 1.0, 1.0, 0.0, 0), (8, 3, 1, 1.0, 2.0, 0.0, 0),
(9, 3, 3, 1.0, 4.0, 0.0, 1), (10, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 1, 1.0, 2.0, 0.0, 1), (4, 1, 3, 2.0, 4.0, 0.0, 0), (5, 2, 0, 1.0, 1.0, 0.0, 1),
(6, 2, 3, 1.0, 4.0, 0.0, 0), (7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 1),
(9, 3, 3, 1.0, 4.0, 0.0, 0), (10, 3, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_long_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_long_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_longonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 1, 1.0, 2.0, 0.0, 0), (5, 2, 2, 1.0, 3.0, 0.0, 1),
(6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 0),
(8, 5, 1, 1.0, 2.0, 0.0, 0), (9, 5, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_short_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_short_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_shortonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 1),
(1, 1, 0, 1.0, 1.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 1, 1.0, 2.0, 0.0, 1), (5, 2, 2, 1.0, 3.0, 0.0, 0),
(6, 3, 1, 1.0, 2.0, 0.0, 1), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 5, 1, 1.0, 2.0, 0.0, 1), (9, 5, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_dir_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_dir_conflict=[[
'ignore',
'long',
'short',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 1, 1.0, 2.0, 0.0, 0), (6, 2, 2, 1.0, 3.0, 0.0, 1),
(7, 3, 1, 1.0, 2.0, 0.0, 0), (8, 3, 2, 1.0, 3.0, 0.0, 0),
(9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 1),
(11, 5, 1, 1.0, 2.0, 0.0, 0), (12, 5, 2, 1.0, 3.0, 0.0, 1),
(13, 6, 1, 1.0, 2.0, 0.0, 1), (14, 6, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_opposite_entry(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False]
]),
exits=pd.DataFrame([
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True]
]),
size=1.,
upon_opposite_entry=[[
'ignore',
'ignore',
'close',
'close',
'closereduce',
'closereduce',
'reverse',
'reverse',
'reversereduce',
'reversereduce'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 1),
(2, 2, 0, 1.0, 1.0, 0.0, 0), (3, 2, 1, 1.0, 2.0, 0.0, 1), (4, 2, 2, 1.0, 3.0, 0.0, 0),
(5, 3, 0, 1.0, 1.0, 0.0, 1), (6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 4, 0, 1.0, 1.0, 0.0, 0), (9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 0),
(11, 5, 0, 1.0, 1.0, 0.0, 1), (12, 5, 1, 1.0, 2.0, 0.0, 0), (13, 5, 2, 1.0, 3.0, 0.0, 1),
(14, 6, 0, 1.0, 1.0, 0.0, 0), (15, 6, 1, 2.0, 2.0, 0.0, 1), (16, 6, 2, 2.0, 3.0, 0.0, 0),
(17, 7, 0, 1.0, 1.0, 0.0, 1), (18, 7, 1, 2.0, 2.0, 0.0, 0), (19, 7, 2, 2.0, 3.0, 0.0, 1),
(20, 8, 0, 1.0, 1.0, 0.0, 0), (21, 8, 1, 2.0, 2.0, 0.0, 1), (22, 8, 2, 2.0, 3.0, 0.0, 0),
(23, 9, 0, 1.0, 1.0, 0.0, 1), (24, 9, 1, 2.0, 2.0, 0.0, 0), (25, 9, 2, 2.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(**kwargs, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 2, 1.0, 3.0, 0.0, 0),
(2, 1, 0, 1.0, 1.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 1, 1.0, 2.0, 0.0, 1), (6, 2, 2, 1.0, 3.0, 0.0, 0),
(7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 0), (9, 3, 2, 1.0, 3.0, 0.0, 1),
(10, 4, 0, 1.0, 1.0, 0.0, 0), (11, 4, 1, 1.0, 2.0, 0.0, 1), (12, 4, 2, 1.0, 3.0, 0.0, 0),
(13, 5, 0, 1.0, 1.0, 0.0, 1), (14, 5, 1, 1.0, 2.0, 0.0, 0), (15, 5, 2, 1.0, 3.0, 0.0, 1),
(16, 6, 0, 1.0, 1.0, 0.0, 0), (17, 6, 1, 2.0, 2.0, 0.0, 1), (18, 6, 2, 2.0, 3.0, 0.0, 0),
(19, 7, 0, 1.0, 1.0, 0.0, 1), (20, 7, 1, 2.0, 2.0, 0.0, 0), (21, 7, 2, 2.0, 3.0, 0.0, 1),
(22, 8, 0, 1.0, 1.0, 0.0, 0), (23, 8, 1, 1.0, 2.0, 0.0, 1), (24, 8, 2, 1.0, 3.0, 0.0, 0),
(25, 9, 0, 1.0, 1.0, 0.0, 1), (26, 9, 1, 1.0, 2.0, 0.0, 0), (27, 9, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_init_cash(self):
record_arrays_close(
from_signals_both(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 3, 1.0, 4.0, 0.0, 1), (1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 3, 2.0, 4.0, 0.0, 1),
(3, 2, 0, 1.0, 1.0, 0.0, 0), (4, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1), (2, 2, 0, 1.0, 1.0, 0.0, 0),
(3, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 0.25, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 0.5, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_both(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(init_cash=np.inf).order_records
def test_group_by(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_signals_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_signals_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
entries=pd.DataFrame([
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
[False, True, False],
]),
exits=pd.DataFrame([
[False, False, False],
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_signals_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_signals_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_signals_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 1, 0],
[1, 0, 2]
])
)
pf = from_signals_longonly(**kwargs, size=1., size_type='percent')
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100.0, 1.0, 0.0, 0), (1, 2, 1, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 1, 2, 100.0, 1.0, 0.0, 1), (4, 0, 2, 100.0, 1.0, 0.0, 0), (5, 0, 3, 100.0, 1.0, 0.0, 1),
(6, 2, 3, 100.0, 1.0, 0.0, 0), (7, 2, 4, 100.0, 1.0, 0.0, 1), (8, 1, 4, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 0, 1]
])
)
def test_sl_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(sl_stop=-0.1)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0), (2, 1, 1, 20.0, 4.0, 0.0, 1),
(3, 2, 0, 20.0, 5.0, 0.0, 0), (4, 2, 3, 20.0, 2.0, 0.0, 1),
(5, 3, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1),
(2, 2, 0, 20.0, 5.0, 0.0, 1),
(3, 3, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0), (2, 1, 1, 20.0, 4.25, 0.0, 1),
(3, 2, 0, 20.0, 5.0, 0.0, 0), (4, 2, 1, 20.0, 4.25, 0.0, 1),
(5, 3, 0, 20.0, 5.0, 0.0, 0), (6, 3, 1, 20.0, 4.0, 0.0, 1),
(7, 4, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1),
(2, 2, 0, 20.0, 5.0, 0.0, 1),
(3, 3, 0, 20.0, 5.0, 0.0, 1),
(4, 4, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0),
(3, 3, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 2.0, 0.0, 0),
(3, 2, 0, 100.0, 1.0, 0.0, 1), (4, 2, 3, 50.0, 4.0, 0.0, 0),
(5, 3, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0),
(3, 3, 0, 100.0, 1.0, 0.0, 0),
(4, 4, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.75, 0.0, 0),
(3, 2, 0, 100.0, 1.0, 0.0, 1), (4, 2, 1, 100.0, 1.75, 0.0, 0),
(5, 3, 0, 100.0, 1.0, 0.0, 1), (6, 3, 1, 100.0, 2.0, 0.0, 0),
(7, 4, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_ts_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(ts_stop=-0.1)
close = pd.Series([4., 5., 4., 3., 2.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 0),
(1, 1, 0, 25.0, 4.0, 0.0, 0), (2, 1, 2, 25.0, 4.0, 0.0, 1),
(3, 2, 0, 25.0, 4.0, 0.0, 0), (4, 2, 4, 25.0, 2.0, 0.0, 1),
(5, 3, 0, 25.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 1),
(1, 1, 0, 25.0, 4.0, 0.0, 1), (2, 1, 1, 25.0, 5.0, 0.0, 0),
(3, 2, 0, 25.0, 4.0, 0.0, 1),
(4, 3, 0, 25.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records
)
print('here')
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.15, 0.2, 0.25, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 0),
(1, 1, 0, 25.0, 4.0, 0.0, 0), (2, 1, 2, 25.0, 4.25, 0.0, 1),
(3, 2, 0, 25.0, 4.0, 0.0, 0), (4, 2, 2, 25.0, 4.25, 0.0, 1),
(5, 3, 0, 25.0, 4.0, 0.0, 0), (6, 3, 2, 25.0, 4.125, 0.0, 1),
(7, 4, 0, 25.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.15, 0.2, 0.25, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 1),
(1, 1, 0, 25.0, 4.0, 0.0, 1), (2, 1, 1, 25.0, 5.25, 0.0, 0),
(3, 2, 0, 25.0, 4.0, 0.0, 1), (4, 2, 1, 25.0, 5.25, 0.0, 0),
(5, 3, 0, 25.0, 4.0, 0.0, 1), (6, 3, 1, 25.0, 5.25, 0.0, 0),
(7, 4, 0, 25.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([2., 1., 2., 3., 4.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0),
(1, 1, 0, 50.0, 2.0, 0.0, 0), (2, 1, 1, 50.0, 1.0, 0.0, 1),
(3, 2, 0, 50.0, 2.0, 0.0, 0),
(4, 3, 0, 50.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 1),
(1, 1, 0, 50.0, 2.0, 0.0, 1), (2, 1, 2, 50.0, 2.0, 0.0, 0),
(3, 2, 0, 50.0, 2.0, 0.0, 1), (4, 2, 4, 50.0, 4.0, 0.0, 0),
(5, 3, 0, 50.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0),
(1, 1, 0, 50.0, 2.0, 0.0, 0), (2, 1, 1, 50.0, 0.75, 0.0, 1),
(3, 2, 0, 50.0, 2.0, 0.0, 0), (4, 2, 1, 50.0, 0.5, 0.0, 1),
(5, 3, 0, 50.0, 2.0, 0.0, 0),
(6, 4, 0, 50.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 1),
(1, 1, 0, 50.0, 2.0, 0.0, 1), (2, 1, 2, 50.0, 1.75, 0.0, 0),
(3, 2, 0, 50.0, 2.0, 0.0, 1), (4, 2, 2, 50.0, 1.75, 0.0, 0),
(5, 3, 0, 50.0, 2.0, 0.0, 1), (6, 3, 2, 50.0, 1.75, 0.0, 0),
(7, 4, 0, 50.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
def test_tp_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(sl_stop=-0.1)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0),
(2, 2, 0, 20.0, 5.0, 0.0, 0),
(3, 3, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1), (2, 1, 1, 20.0, 4.0, 0.0, 0),
(3, 2, 0, 20.0, 5.0, 0.0, 1), (4, 2, 3, 20.0, 2.0, 0.0, 0),
(5, 3, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0),
(2, 2, 0, 20.0, 5.0, 0.0, 0),
(3, 3, 0, 20.0, 5.0, 0.0, 0),
(4, 4, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1), (2, 1, 1, 20.0, 4.25, 0.0, 0),
(3, 2, 0, 20.0, 5.0, 0.0, 1), (4, 2, 1, 20.0, 4.25, 0.0, 0),
(5, 3, 0, 20.0, 5.0, 0.0, 1), (6, 3, 1, 20.0, 4.0, 0.0, 0),
(7, 4, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0), (2, 1, 1, 100.0, 2.0, 0.0, 1),
(3, 2, 0, 100.0, 1.0, 0.0, 0), (4, 2, 3, 100.0, 4.0, 0.0, 1),
(5, 3, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 1),
(3, 3, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0), (2, 1, 1, 100.0, 1.75, 0.0, 1),
(3, 2, 0, 100.0, 1.0, 0.0, 0), (4, 2, 1, 100.0, 1.75, 0.0, 1),
(5, 3, 0, 100.0, 1.0, 0.0, 0), (6, 3, 1, 100.0, 2.0, 0.0, 1),
(7, 4, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 1),
(3, 3, 0, 100.0, 1.0, 0.0, 1),
(4, 4, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_stop_entry_price(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='val_price',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.625, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='price',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.75, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='fillprice',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 3.0250000000000004, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 3, 16.52892561983471, 1.5125000000000002, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='close',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.5, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
def test_stop_exit_price(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 4.25, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.5, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='stopmarket', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.825, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.25, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 1.125, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='close', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.6, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.7, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 0.9, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='price', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.9600000000000004, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.97, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 0.9900000000000001, 0.0, 1)
], dtype=order_dt)
)
def test_upon_stop_exit(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits, size=1,
sl_stop=0.1, upon_stop_exit=[['close', 'closereduce', 'reverse', 'reversereduce']],
accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 1),
(2, 1, 0, 1.0, 5.0, 0.0, 0), (3, 1, 1, 1.0, 4.0, 0.0, 1),
(4, 2, 0, 1.0, 5.0, 0.0, 0), (5, 2, 1, 2.0, 4.0, 0.0, 1),
(6, 3, 0, 1.0, 5.0, 0.0, 0), (7, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits, size=1,
sl_stop=0.1, upon_stop_exit=[['close', 'closereduce', 'reverse', 'reversereduce']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 1),
(2, 1, 0, 1.0, 5.0, 0.0, 0), (3, 1, 1, 1.0, 4.0, 0.0, 1),
(4, 2, 0, 1.0, 5.0, 0.0, 0), (5, 2, 1, 2.0, 4.0, 0.0, 1),
(6, 3, 0, 1.0, 5.0, 0.0, 0), (7, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_stop_update(self):
entries = pd.Series([True, True, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = | pd.Series([5., 4., 3., 2., 1.], index=price.index) | pandas.Series |
"""
This is the main script of main GUI of the OXCART Atom Probe.
@author: <NAME> <<EMAIL>>
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import RectangleSelector, EllipseSelector
from matplotlib.patches import Circle, Rectangle
import pandas as pd
from pyccapt.calibration import selectors_data, variables, data_tools
def fetch_dataset_from_dld_grp(filename:"type: string - Path to hdf5(.h5) file")->"type:list - list of dataframes":
try:
hdf5Data = data_tools.read_hdf5(filename)
dld_highVoltage = hdf5Data['dld/high_voltage']
dld_pulseVoltage = hdf5Data['dld/pulse_voltage']
dld_startCounter = hdf5Data['dld/start_counter']
dld_t = hdf5Data['dld/t']
dld_x = hdf5Data['dld/x']
dld_y = hdf5Data['dld/y']
dldGroupStorage = [dld_highVoltage, dld_pulseVoltage, dld_startCounter, dld_t, dld_x, dld_y]
return dldGroupStorage
except KeyError as error:
print("[*]Keys missing in the dataset -> ", error)
def concatenate_dataframes_of_dld_grp(dataframeList:"type:list - list of dataframes")->"type:list - list of dataframes":
dld_masterDataframeList = dataframeList
dld_masterDataframe = | pd.concat(dld_masterDataframeList, axis=1) | pandas.concat |
import numpy as np
import pandas as pd
from astropy.modeling import models,fitting
from astropy.io import fits
from astropy.table import Table
class OptimalExtraction:
"""
OptimalExtraction is a python class to perform optimal extraction of a spectrum from an image. Originally, the algorithm was proposed by [1]. In this adaptation, we followed [2].
#####
+ Inputs:
- image = an image in 2D array.
Assumptions:
i) image was processed, cleaned, and calibrated in a standard CCD processing steps (i.e., bias subtraction, flatfield, cosmic-ray removal/repair, and background subtracted).
ii) image was resampling from the original image so that
a) dispersion is along x-axis and cross-dirspersion along y-axis. (See hstgrism.aperturedirection and hstgrism.resampling if you are working with HST images).
b) Each x column is assumed to be one wavelength bin with line spreading along y-axis.
c) Peak of each wavelength bin is aligned at the center row in the image. (See hstgrism.extractcompoundmodel1d for fitting the trace centers).
- var = 2D array of variance corresponding to the image
- bin_number = 1D array parallel to dispersion axis (i.e., x-axis) specifying bin numbers. Number 0 = skip the extraction on that column.
- bin_kernel_initial = initial extraction kernel. Only astropy.modeling.models.Gaussian1D is supported for this version.
- fitter = astropy.modeling.fitting.LevMarLSQFitter() recommended.
- do_fit_bin = True to re-fit using the bin_initial_kernel, and new parameters will be used in the final step of extraction. If False, bin_initial_kernel would be used for extracting in the final step.
- kernel_mode = 'Gaussian1D' only in this version.
#####
+ Compute:
- self.compute() to start the optimal extraction after properly instantiate.
i) self.bin_image = 2D array of binned image, i.e., columns with the same bin_number are added. x-axis of bin_image corresponds to bin_number.
ii) self.bin_var = 2D array of binned variance. Simply add columns of variance with the same bin_number.
iii) self.bin_kernel_fit = dict of key:bin_number, value:extraction kernel. If do_fit_bin = False, bin_kernel_fit = bin_kernel_initial.
iv) self.bin_image_kernel = 2D array of binned image using bin_kernel_fit for estimation.
v) self.kernel_fit = 1D array of extraction kernel runs parallel to dispersion axis (unbinned), and re-normalized. This is simply prepared from bin_kernel_fit and bin_numer. Re-normalization is done for each dispersion column given bin_kernel_fit, but fixed the shape profile. For example, in Gaussian1D kernel, the re-normalization fixes mean and stddev of each column, and re-fit for the amplitude.
vi) self.image_kernel = 2D array of unbinned image using kernel_fit for estimation.
vii) self.optimal1d = 1D array of optimally extracted profile
#####
+ Save:
- container = optimalextraction.container.Container
- wavelength = 1D array of wavelengths parallel to dispersion columns. (Use trace.csv if working with HSTGRISM environment).
- optimalextraction.fits = multiextension fits file (See more in astropy.io.fits)
EXT0: PrimaryHDU
EXT1: ImageHDU as self.image_kernel
EXT2: BinTableHDU with columns as bin_number (i.e., 1D array parallel to dispersion columns specifying bin numbers), and self.kernel_fit parameters (i.e., amplitude, mean, stddev for Gaussian1D kernel_mode).
- optimalextraction.csv = csv table with columns as column_index, wavelength (if specified), spectrum. Spectrum is optimally extracted.
#####
+ References:
[1] Horne 1986, 'An optimal extraction algorithm for CCD spectroscopy': https://ui.adsabs.harvard.edu/abs/1986PASP...98..609H/abstract
[2] Space Telescope Science Institute, 'NIRSpec MOS Optimal Spectral Extraction': https://github.com/spacetelescope/dat_pyinthesky/blob/master/jdat_notebooks/optimal_extraction/Spectral%20Extraction.ipynb
[3] Astropy, 'Models and Fitting': https://docs.astropy.org/en/stable/modeling/index.html
"""
def __init__(self,image,var,bin_number,bin_kernel_initial,fitter,do_fit_bin=True,kernel_mode='Gaussian1D'):
self.image = image
self.var = var
self.bin_number = bin_number
self.bin_kernel_initial = bin_kernel_initial
self.fitter = fitter
self.do_fit_bin = do_fit_bin
self.kernel_mode = kernel_mode
def compute(self):
self.bin_image = self._make_bin_image(mode='image')
self.bin_var = self._make_bin_image(mode='var')
self.bin_kernel_fit = self.bin_kernel_initial
if self.do_fit_bin:
self.bin_kernel_fit = self._fit_bin()
self.bin_image_kernel = self._compute_bin_image_kernel()
self.kernel_fit,self.image_kernel = self._compute_kernel_fit()
self.optimal1d = self._compute_optimal()
def save(self,container=None,wavelength=None):
if container is None:
raise ValueError('container must be specified. See optimalextraction.container.Container.')
#####
# bin_number,kernel_fit,image_kernel >>> optimalextraction.fits
string = './{0}/{1}_optimalextraction.fits'.format(container.data['savefolder'],container.data['saveprefix'])
if self.kernel_mode=='Gaussian1D':
amplitude,mean,stddev = [],[],[]
for ii,i in enumerate(self.kernel_fit):
amplitude.append(self.kernel_fit[ii].amplitude[0])
mean.append(self.kernel_fit[ii].mean[0])
stddev.append(self.kernel_fit[ii].stddev[0])
t = {'bin_number':self.bin_number,'amplitude':amplitude,'mean':mean,'stddev':stddev}
phdu = fits.PrimaryHDU()
ihdu = fits.ImageHDU(self.image_kernel)
bhdu = fits.BinTableHDU(Table(t))
hdul = fits.HDUList([phdu,ihdu,bhdu])
hdul.writeto(string,overwrite=True)
print('Save {0}'.format(string))
#####
# optimal1d >>> optimalextraction.csv
string = './{0}/{1}_optimalextraction.csv'.format(container.data['savefolder'],container.data['saveprefix'])
ty,ty_name = self.optimal1d,'spectrum'
tx,tx_name = np.arange(len(ty)),'column_index'
t = {tx_name:tx, ty_name:ty}
if wavelength is not None:
tw,tw_name = wavelength,'wavelength'
t = {tx_name:tx, tw_name:tw, ty_name:ty}
| pd.DataFrame(t) | pandas.DataFrame |
import requests
import pandas as pd
import world_bank_data as wb
import lxml
def wb_corr(data, col, indicator, change=False):
pd.options.mode.chained_assignment = None # Change option within function to avoid warning of value being placed on a copy of a slice.
"""
Returns the relationship that an input variable has with a chosen variable or chosen variables from the World Bank data, sorted by the strength of relationship
Relationship can be either the correlation between the input variable and the chosen indicator(s) or the correlation in the annual percent changes
Parameters
----------
data: A pandas dataframe that contains a column of countries called "Country," a column of years called "Year," and a column of data for a variable
col: The integer index of the column in which the data of your variable exists in your dataframe
indicator: The indicator or list of indicators to check the relationship with the input variable. Can be a character string of the indicator ID or a list
of character strings. Indicator IDs can be found through use of the World Bank APIs
change: A Boolean value. When set to True, the correlation between the annual percent change of the input variable and the annual percent change of
chosen indicator(s) will be found and used to order the strength of relationships
Returns
----------
Pandas DataFrame
A Pandas DataFrame containing the indicator names as the index and the correlation between the indicator and the input variable. If change set to True,
another column including the correlation between the annual percent changes of the variables will be included. The DataFrame is ordered on the
correlation if change is set to False and on the correlation of percent changes if change is set to True.
The number of rows in the DataFrame will correspond to the number of indicators that were requested. The number of columns will be 1 if change is
set to False and 2 if change is True.
Examples
----------
>>> import ____
>>> wb_corr(my_df, 2, '3.0.Gini') #where my_df has columns Country, Year, Data
|Indicator | Correlation | n
--------------------------------------
|Gini Coefficient| -0.955466 | 172
>>> wb_corr(wb.get_series('SP.POP.TOTL',mrv=50).reset_index,3,['3.0.Gini','1.0.HCount.1.90usd'],True) # To compare one WB indicator with others
| Indicator | Correlation | n | Correlation_change | n_change
----------------------------------------------------------------------------------------
| Poverty Headcount ($1.90 a day)| -0.001202 |172 | 0.065375 | 134
| Gini Coefficient | 0.252892 |172 | 0.000300 | 134
"""
assert type(indicator)==str or type(indicator)==list, "indicator must be either a string or a list of strings"
assert type(col)==int, "col must be the integer index of the column containing data on the variable of interest"
assert 'Country' in data.columns, "data must have a column containing countries called 'Country'"
assert 'Year' in data.columns, "Data must have a column containing years called 'Year'"
assert col<data.shape[1], "col must be a column index belonging to data"
assert type(change)==bool, "change must be a Boolean value (True or False)"
cors=[]
indicators=[]
n=[]
if type(indicator)==str:
assert indicator in list(pd.read_xml(requests.get('http://api.worldbank.org/v2/indicator?per_page=21000').content)['id']), "indicator must be the id of an indicator in the World Bank Data. Indicators can be found using the World Bank APIs. http://api.worldbank.org/v2/indicator?per_page=21000 to see all indicators or http://api.worldbank.org/v2/topic/_/indicator? to see indicators under a chosen topic (replace _ with integer 1-21)"
thing=pd.DataFrame(wb.get_series(indicator,mrv=50)) # Create a Pandas DataFrame with the data on the chosen indicator using the world_bank_data package
merged=pd.merge(data,thing,how='inner',on=['Country','Year'])
cors.append(merged.iloc[:,col].corr(merged.iloc[:,(merged.shape[1]-1)]))
indicators.append(pd.DataFrame(wb.get_series(indicator,mrv=1)).reset_index()['Series'][0])
n.append(len(merged[merged.iloc[:,col].notnull() & merged.iloc[:,(merged.shape[1]-1)].notnull()]))
if change==False:
return pd.DataFrame(list(zip(indicators,cors,n)),columns=['Indicator','Correlation','n']).sort_values(by='Correlation',key=abs,ascending=False).set_index('Indicator')
if change==True:
mumbo=pd.DataFrame() #Create an empty dataframe to include the annual percent change data for the input variable
cors_change=[]
n_change=[]
for country in data['Country'].unique():
s=data[data['Country']==country]
s.loc[:,'lag_dat']=s.iloc[:,col].shift(-1) # Generates warning message if option is not changed above
s.loc[:,'pct_chg_dat']=(((s.iloc[:,col]-s['lag_dat'])/s['lag_dat'])*100)
mumbo=pd.concat([mumbo,s])
t=thing.reset_index()
jumbo=pd.DataFrame() #Empty dataframe to contain the percent change data for World Bank data
for country in t['Country'].unique():
y=t[t['Country']==country]
y.loc[:,'lag_ind']=y.iloc[:,3].shift(-1) # Generates warning message if pandas option is not changed above
y.loc[:,'pct_chg_ind']=(((y.iloc[:,3]-y['lag_ind'])/y['lag_ind'])*100)
jumbo=pd.concat([jumbo,y])
merged_pct=pd.merge(mumbo,jumbo,how='left',on=['Country','Year']) #inner?
cors_change.append(merged_pct.loc[:,'pct_chg_dat'].corr(merged_pct.loc[:,'pct_chg_ind']))
n_change.append(len(merged_pct[merged_pct.loc[:,'pct_chg_dat'].notnull() & merged_pct.loc[:,'pct_chg_ind'].notnull()]))
return pd.DataFrame(list(zip(indicators,cors,n,cors_change,n_change)),columns=['Indicator','Correlation','n','Correlation_change','n_change']).sort_values(by='Correlation',key=abs,ascending=False).set_index('Indicator')
if type(indicator)==list:
for indic in indicator:
assert type(indic)==str, "Elements of indicator must be strings"
assert indic in list(pd.read_xml(requests.get('http://api.worldbank.org/v2/indicator?per_page=21000').content)['id']), "indicator must be the id of an indicator in the World Bank Data. Indicators can be found using the World Bank APIs. http://api.worldbank.org/v2/indicator?per_page=21000 to see all indicators or http://api.worldbank.org/v2/topic/_/indicator? to see indicators under a chosen topic (replace _ with integer 1-21)"
for i in range(0,len(indicator)):
thing=pd.DataFrame(wb.get_series(indicator[i],mrv=50)).reset_index() # Create a Pandas DataFrame with the data on the chosen indicator using the world_bank_data package
merged=pd.merge(data,thing,how='inner',on=['Country','Year'])
cors.append(merged.iloc[:,col].corr(merged.iloc[:,(merged.shape[1]-1)]))
indicators.append(pd.DataFrame(wb.get_series(indicator[i],mrv=1)).reset_index()['Series'][0])
n.append(len(merged[merged.iloc[:,col].notnull() & merged.iloc[:,(merged.shape[1]-1)].notnull()]))
if change==False:
return pd.DataFrame(list(zip(indicators,cors,n)),columns=['Indicator','Correlation','n']).sort_values(by='Correlation',key=abs,ascending=False).set_index('Indicator')
if change==True:
cors_change=[]
n_change=[]
for i in range(0,len(indicator)):
mumbo=pd.DataFrame() # Create an empty dataframe to include the annual percent change data for the input variable
jumbo=pd.DataFrame() # Empty dataframe to contain the percent change data for World Bank data
thing=pd.DataFrame(wb.get_series(indicator[i],mrv=50)).reset_index()
for country in data['Country'].unique():
s=data[data['Country']==country]
s.loc[:,'lag_dat']=s.iloc[:,col].shift(-1) # Generates warning message if pandas option is not changed above
s.loc[:,'pct_chg_dat']=(((s.iloc[:,col]-s['lag_dat'])/s['lag_dat'])*100)
mumbo=pd.concat([mumbo,s])
for country in thing['Country'].unique():
y=thing[thing['Country']==country]
y.loc[:,'lag_ind']=y.iloc[:,3].shift(-1) # Generates warning message if pandas option is not changed above
y.loc[:,'pct_chg_ind']=(((y.iloc[:,3]-y['lag_ind'])/y['lag_ind'])*100)
jumbo=pd.concat([jumbo,y])
merged_pct=pd.merge(mumbo,jumbo,how='left',on=['Country','Year'])
cors_change.append(merged_pct.loc[:,'pct_chg_dat'].corr(merged_pct.loc[:,'pct_chg_ind']))
n_change.append(len(merged_pct[merged_pct.loc[:,'pct_chg_dat'].notnull() & merged_pct.loc[:,'pct_chg_ind'].notnull()]))
return pd.DataFrame(list(zip(indicators,cors,n,cors_change,n_change)),columns=['Indicator','Correlation','n','Correlation_change','n_change']).sort_values(by='Correlation_change',key=abs,ascending=False).set_index('Indicator')
pd.options.mode.chained_assignment = orig_value
def wb_topic_corrs(data,col,topic,k=5,change=False,nlim=1,cor_lim=0,t_lim=0):
from math import sqrt
pd.options.mode.chained_assignment = None # Change option within function to avoid warning of value being placed on a copy of a slice.
"""
Returns the relationship that an input variable has with the indicators in a chosen topic from the World Bank data, sorted by the strength of relationship.
Relationship can be either the correlation between the input variable and the chosen indicator(s) or the correlation in the annual percent changes
Parameters
----------
data: A pandas dataframe that contains a column of countries called "Country," a column of years called "Year," and a column of data for a variable
col: The integer index of the column in which the data of your variable exists in your dataframe
topic: A character string of the topic name or the integer corresponding to the topic. Topics can be found through the World Bank APIs
k: An integer indicating the number of variables to return. The k variables with the strongest relationships to the input variable will be returned.
change: A Boolean value. When set to True, the correlation between the annual percent change of the input variable and the annual percent change of
chosen indicator(s) will be found and used to order the strength of relationships
nlim: An integer indicating the minimum n of indicators to be reported.
cor_lim: A real number indicating the minimum absolute value of the correlation between the input variable and World Bank indicators to be reported
t_lim: A real number indicating the minimum t score of the correlation between the input variable and World Bank indicators to be reported.
Returns
----------
Pandas DataFrame
A Pandas DataFrame containing the indicator names as the index and the correlation between the indicator and the input variable. If change set to True,
another column including the correlation between the annual percent changes of the variables will be included. The DataFrame is ordered on the
correlation if change is set to False and on the correlation of percent changes if change is set to True.
The number of rows in the DataFrame will be, at most, k. The number of columns will depend on the settings of change, nlim, and t_lim.
Examples
----------
>>> import ____
>>> wb_topic_corrs(my_df,2,1) #Where my_df has columns Country, Year, Data
| Indicator | Correlation | n
------------------------------------------------------------------------------
|Access to non-solid fuel, rural (% of rural population) |0.457662 |1519
|Access to electricity, rural (% of rural population) |0.457662 |1519
|Average precipitation in depth (mm per year) |-0.442344 |353
|Annual freshwater withdrawals, agriculture (% of total f|-0.429246 |313
|Livestock production index (2014-2016 = 100) |0.393510 |1696
>>> wb_topic_corrs(wb.get_series('3.0.Gini',mrv=50).reset_index(),3,'Energy & Mining',change=True,cor_lim=.2) #To check a WB variable against its own or another topic
| Indicator | Correlation | n | Correlation_change | n_change
----------------------------------------------------------------------------------------------------------------
|Access to electricity (% of population) |-0.434674 |172 | -0.232096 | 134
|Access to electricity, urban (% of urban population) |-0.276086 |172 | -0.225105 | 134
|Electricity production from coal sources (% of total) |0.066986 |172 | 0.200032 | 62
"""
assert type(topic)==int or type(topic)==str, "indicator must be either a string or an integer corresponding to the topic. A list of topics can be found through the World Bank API: http://api.worldbank.org/v2/topic?"
assert type(col)==int, "col must be the integer index of the column containing data on the variable of interest"
assert 'Country' in data.columns, "data must have a column containing countries called 'Country'"
assert 'Year' in data.columns, "data must have a column containing years called 'Year'"
assert col<data.shape[1], "col must be a column index belonging to data"
assert type(change)==bool, "change must be a Boolean value (True or False)"
assert type(k)==int, "k must be an integer"
assert type(nlim)==int, "n must be an integer"
assert (type(cor_lim)==float or type(cor_lim)==int), "cor_lim must be a real number"
assert (type(t_lim)==float or type(t_lim)==int), "n_lim must be a real number"
if topic=='Agriculture & Rural Development' or topic==1:
top_df=pd.read_xml(requests.get('http://api.worldbank.org/v2/topic/1/indicator?per_page=50').content)
if topic=='Aid Effectiveness'or topic==2:
top_df=pd.read_xml(requests.get('http://api.worldbank.org/v2/topic/2/indicator?per_page=80').content)
if topic=='Economy & Growth'or topic==3:
top_df=pd.read_xml(requests.get('http://api.worldbank.org/v2/topic/3/indicator?per_page=310').content)
if topic=='Education'or topic==4:
top_df=pd.read_xml(requests.get('http://api.worldbank.org/v2/topic/4/indicator?per_page=1015').content)
if topic=='Energy & Mining'or topic==5:
top_df=pd.read_xml(requests.get('http://api.worldbank.org/v2/topic/5/indicator?per_page=55').content)
if topic=='Environment'or topic==6:
top_df=pd.read_xml(requests.get('http://api.worldbank.org/v2/topic/6/indicator?per_page=145').content)
if topic=='Financial Sector'or topic==7:
top_df=pd.read_xml(requests.get('http://api.worldbank.org/v2/topic/7/indicator?per_page=210').content)
if topic=='Health'or topic==8:
top_df=pd.read_xml(requests.get('http://api.worldbank.org/v2/topic/8/indicator?per_page=651').content)
if topic=='Infrastructure'or topic==9:
top_df=pd.read_xml(requests.get('http://api.worldbank.org/v2/topic/9/indicator?per_page=80').content)
if topic=='Social Protection & Labor'or topic==10:
top_df=pd.read_xml(requests.get('http://api.worldbank.org/v2/topic/10/indicator?per_page=2150').content)
if topic=='Poverty'or topic==11:
top_df=pd.read_xml(requests.get('http://api.worldbank.org/v2/topic/11/indicator?per_page=150').content)
if topic=='Private Sector'or topic==12:
top_df=pd.read_xml(requests.get('http://api.worldbank.org/v2/topic/12/indicator?per_page=200').content)
if topic=='Public Sector'or topic==13:
top_df=pd.read_xml(requests.get('http://api.worldbank.org/v2/topic/13/indicator?per_page=120').content)
if topic=='Science & Technology'or topic==14:
top_df=pd.read_xml(requests.get('http://api.worldbank.org/v2/topic/14/indicator?per_page=15').content)
if topic=='Social Development'or topic==15:
top_df=pd.read_xml(requests.get('http://api.worldbank.org/v2/topic/15/indicator?per_page=35').content)
if topic=='Urban Development'or topic==16:
top_df=pd.read_xml(requests.get('http://api.worldbank.org/v2/topic/16/indicator?per_page=35').content)
if topic=='Gender'or topic==17:
top_df=pd.read_xml(requests.get('http://api.worldbank.org/v2/topic/17/indicator?per_page=315').content)
if topic=='Millenium Development Goals'or topic==18:
top_df=pd.read_xml(requests.get('http://api.worldbank.org/v2/topic/18/indicator?per_page=30').content)
if topic=='Climate Change'or topic==19:
top_df=pd.read_xml(requests.get('http://api.worldbank.org/v2/topic/19/indicator?per_page=85').content)
if topic=='External Debt'or topic==20:
top_df=pd.read_xml(requests.get('http://api.worldbank.org/v2/topic/20/indicator?per_page=520').content)
if topic=='Trade'or topic==21:
top_df=pd.read_xml(requests.get('http://api.worldbank.org/v2/topic/21/indicator?per_page=160').content)
cors=[]
indicators=[]
n=[]
t=[]
if change==False:
for i in range(0,(len(top_df['id']))):
try:
indicator=top_df.loc[i,'id']
thing=pd.DataFrame(wb.get_series(indicator,mrv=50))
except:
pass
merged=pd.merge(data,thing,how='inner',on=['Country','Year'])
cor_i=(merged.iloc[:,col].corr(merged.iloc[:,(merged.shape[1]-1)]))
cors.append(cor_i)
indicators.append(top_df['{http://www.worldbank.org}name'][i])
n_i=(len(merged[merged.iloc[:,col].notnull() & merged.iloc[:,(merged.shape[1]-1)].notnull()]))
n.append(n_i)
if cor_i==1 or cor_i==-1: # Avoid division by 0
t.append(None)
else:
t.append((cor_i*(sqrt((n_i-2)/(1-(cor_i*cor_i))))))
if t_lim==0:
almost_there = pd.DataFrame(list(zip(indicators,cors,n)),columns=['Indicator','Correlation','n']).sort_values(by='Correlation',key=abs,ascending=False).set_index('Indicator')
return almost_there.loc[(almost_there.n>nlim) & ((almost_there.Correlation>cor_lim) | (almost_there.Correlation<-cor_lim))].head(k)
if t_lim != 0:
almost_there = pd.DataFrame(list(zip(indicators,cors,n,t)),columns=['Indicator','Correlation','n','t']).sort_values(by='Correlation',key=abs,ascending=False).set_index('Indicator')
return almost_there.loc[(almost_there.n>nlim) & ((almost_there.Correlation>cor_lim) | (almost_there.Correlation<-cor_lim)) & ((almost_there.t>t_lim) | (almost_there.t<-t_lim))].head(k)
if change==True:
cors_change=[]
n_change=[]
t_change=[]
mumbo=pd.DataFrame() # Create a Pandas DataFrame with the data on the chosen indicator using the world_bank_data package
for country in data['Country'].unique():
s=data[data['Country']==country]
s.loc[:,'lag_dat']=s.iloc[:,col].shift(-1) # Generates warning message if pandas option is not changed above
s.loc[:,'pct_chg_dat']=(((s.iloc[:,col]-s['lag_dat'])/s['lag_dat'])*100)
mumbo=pd.concat([mumbo,s])
for i in range(0,(len(top_df['id']))):
try:
indicator=top_df.loc[i,'id']
thing=pd.DataFrame(wb.get_series(indicator,mrv=50))
except:
pass # Some variables listed in the World Bank API have since been removed and will therefore be skipped
merged=pd.merge(data,thing,how='inner',on=['Country','Year'])
cor_i=(merged.iloc[:,col].corr(merged.iloc[:,(merged.shape[1]-1)]))
cors.append(cor_i)
n_i=len(merged[merged.iloc[:,col].notnull() & merged.iloc[:,(merged.shape[1]-1)].notnull()])
n.append(n_i)
t.append((cor_i*(sqrt((n_i-2)/(1-(cor_i*cor_i))))))
indicators.append(top_df.loc[i,'{http://www.worldbank.org}name'])
jumbo=pd.DataFrame() #Empty dataframe to contain the percent change data for World Bank data
thing_df=thing.reset_index()
for country in thing_df['Country'].unique():
y=thing_df[thing_df['Country']==country]
y.loc[:,'lag_ind']=y.iloc[:,3].shift(-1) # Generates warning message if pandas option is not changed above
y.loc[:,'pct_chg_ind']=(((y.iloc[:,3]-y['lag_ind'])/y['lag_ind'])*100)
jumbo=pd.concat([jumbo,y])
merged_pct=pd.merge(mumbo,jumbo,how='left',on=['Country','Year'])
cor_chg_i=merged_pct.loc[:,'pct_chg_dat'].corr(merged_pct.loc[:,'pct_chg_ind'])
cors_change.append(cor_chg_i)
n_chg_i=len(merged_pct[merged_pct.loc[:,'pct_chg_dat'].notnull() & merged_pct.loc[:,'pct_chg_ind'].notnull()])
n_change.append(n_chg_i)
if (cor_chg_i==1 or cor_chg_i==-1):
t_change.append(None)
else:
t_change.append(cor_chg_i*sqrt(((n_chg_i-2)/(1-(cor_chg_i*cor_chg_i)))))
if t_lim==0:
almost_there = pd.DataFrame(list(zip(indicators,cors,n,cors_change,n_change)),columns=['Indicator','Correlation','n','Correlation_change','n_change']).sort_values(by='Correlation_change',key=abs,ascending=False).set_index('Indicator')
return almost_there.loc[(almost_there.n_change>nlim) & ((almost_there.Correlation_change>cor_lim) | (almost_there.Correlation_change<-cor_lim))].head(k)
if t_lim!=0:
almost_there = pd.DataFrame(list(zip(indicators,cors,n,t,cors_change,n_change,t_change)),columns=['Indicator','Correlation','n','t','Correlation_change','n_change','t_change']).sort_values(by='Correlation_change',key=abs,ascending=False).set_index('Indicator')
return almost_there.loc[(almost_there.n_change>nlim) & ((almost_there.Correlation_change>cor_lim) | (almost_there.Correlation_change<-cor_lim)) & ((almost_there.t_change>t_lim) | (almost_there.t_change<(-t_lim)))].head(k)
pd.options.mode.chained_assignment = orig_value
def wb_corrs_search(data,col,search,k=5,change=False,nlim=1,cor_lim=0,t_lim=0):
from math import sqrt
pd.options.mode.chained_assignment = None # Change option within function to avoid warning of value being placed on a copy of a slice.
"""
Returns the relationship that an input variable has with the variables from the World Bank data that match a search, sorted by the strength of relationship
Relationship can be either the correlation between the input variable and the chosen indicator(s) or the correlation in the annual percent changes
Parameters
----------
data: A pandas dataframe that contains a column of countries called "Country," a column of years called "Year," and a column of data for a variable
col: The integer index of the column in which the data of your variable exists in your dataframe
search: The search to conduct. Variables that match the given search will be identified and their relationships with the input variable found.
k: An integer indicating the number of variables to return. The k variables with the strongest relationship with the input variable will be returned.
change: A Boolean value. When set to True, the correlation between the annual percent change of the input variable and the annual percent change of
chosen indicator(s) will be found and used to order the strength of relationships
nlim: An integer indicating the minimum n of indicators to be reported.
cor_lim: A real number indicating the minimum absolute value of the correlation between the input variable and World Bank indicators to be reported
t_lim: A real number indicating the minimum t score of the correlation between the input variable and World Bank indicators to be reported.
Returns
----------
Pandas DataFrame
A Pandas DataFrame containing the indicator names as the index and the correlation between the indicator and the input variable. If change set to True,
additional columns including the correlation between the annual percent changes of the variables and the number of observations used in this calculation
will be included. The DataFrame is ordered on the correlation if change is set to False and on the correlation of percent changes if change is set to True.
The number of rows in the dataframe will be, at most, k. The number of columns will depend on the settings of change, nlim, and t_lim.
Examples
----------
>>> import ____
>>> wb_corrs_search(my_df,2,"income share") #Where my_df has columns Country, Year, Data
|Indicator | Correlation | n
---------------------------------------------------------
|Income share held by highest 10% | -0.994108 | 1741
|Income share held by highest 20% | -0.993918 | 1741
|Income share held by third 20% | 0.977071 | 1741
|Income share held by second 20% | 0.973005 | 1741
|Income Share of Fifth Quintile | -0.962370 | 160
>>> wb_corrs_search(wb.get_series('3.0.Gini',mrv=50).reset_index(),3,"income share",change=True,t_lim=.5)
|Indicator | Correlation | n | t | Correlation_change | n_change | t_change
-------------------------------------------------------------------------------------------------------------
|Income Share of Fifth Quintile | 0.991789 |160 |97.479993 |0.983675 |125 |60.623743
|Income Share of Second Quintile | -0.985993 |160 |-74.309907 |-0.925918 |125 |-27.186186
|Income Share of Third Quintile | -0.964258 |160 |-45.744148 |-0.918473 |125 |-25.756680
|Income share held by highest 20%| 0.970095 |172 |52.110510 |0.872767 |134 |20.542079
|Income share held by highest 10%| 0.952781 |172 |40.910321 |0.857376 |134 |19.138677
"""
assert type(search)==str, "search must be a character string."
assert 'Country' in data.columns, "data must have a column containing countries called 'Country'"
assert 'Year' in data.columns, "data must have a column containing years called 'Year'"
assert type(col)==int, "col must be an integer of a column index that exists in data"
assert col<data.shape[1], "col must be a column index belonging to data"
assert type(change)==bool, "change must be a Boolean value (True or False)"
assert type(k)==int, "k must be an integer"
assert type(nlim)==int, "n must be an integer"
assert (type(cor_lim)==float or type(cor_lim)==int), "cor_lim must be a real number"
assert (type(t_lim)==float or type(t_lim)==int), "n_lim must be a real number"
inds=wb.search_indicators(search).reset_index()
cors=[]
indicators=[]
n=[]
t=[]
for indic in inds['id']:
try:
thing=pd.DataFrame(wb.get_series(indic,mrv=50))
except:
pass
merged=pd.merge(data,thing,how='left',on=['Country','Year'])
cor_i=merged.iloc[:,col].corr(merged.iloc[:,(merged.shape[1]-1)])
cors.append(cor_i)
indicators.append(pd.DataFrame(wb.get_series(indic,mrv=1)).reset_index()['Series'][0])
n_i=len(merged[merged.iloc[:,col].notnull() & merged.iloc[:,(merged.shape[1]-1)].notnull()])
n.append(n_i)
if cor_i==-1 or cor_i==1: # Avoid division by 0.
t.append(None)
else:
t.append((cor_i*(sqrt((n_i-2)/(1-(cor_i*cor_i))))))
if change==False:
if t_lim==0:
almost_there = pd.DataFrame(list(zip(indicators,cors,n)),columns=['Indicator','Correlation','n']).sort_values(by='Correlation',key=abs,ascending=False).set_index('Indicator')
return almost_there.loc[(almost_there.n>nlim) & ((almost_there.Correlation>cor_lim) | (almost_there.Correlation<-cor_lim))].head(k)
if t_lim!=0:
almost_there = pd.DataFrame(list(zip(indicators,cors,n,t)),columns=['Indicator','Correlation','n','t']).sort_values(by='Correlation',key=abs,ascending=False).set_index('Indicator')
return almost_there.loc[(almost_there.n>nlim) & ((almost_there.Correlation>cor_lim) | (almost_there.Correlation<-cor_lim)) & ((almost_there.t>t_lim) | (almost_there.t<-t_lim))].head(k)
if change==True:
cors_chg=[]
n_change=[]
t_change=[]
mumbo=pd.DataFrame() # Create a Pandas DataFrame with the data on the chosen indicator using the world_bank_data package
for country in data['Country'].unique():
m=data[data['Country']==country]
m.loc[:,'lag_dat']=m.iloc[:,col].shift(-1) # Generates warning message if pandas option is not changed above
m.loc[:,'pct_chg_dat']=(((m.iloc[:,col]-m['lag_dat'])/m['lag_dat'])*100)
mumbo=pd.concat([mumbo,m])
for indic in inds['id']:
jumbo=pd.DataFrame()
thing2=pd.DataFrame(wb.get_series(indic,mrv=50)).reset_index()
for country in thing2['Country'].unique():
j=thing2[thing2['Country']==country]
j.loc[:,'lag_ind']=j.iloc[:,3].shift(-1) # Generates warning message if pandas option is not changed above
j.loc[:,'pct_chg_ind']=(((j.iloc[:,3]-j['lag_ind'])/j['lag_ind'])*100)
jumbo=pd.concat([jumbo,j]) #Empty dataframe to contain the percent change data for World Bank data
merged_pct=pd.merge(mumbo,jumbo,how='inner',on=['Country','Year'])
cor_chg_i=merged_pct.loc[:,'pct_chg_dat'].corr(merged_pct.loc[:,'pct_chg_ind'])
cors_chg.append(cor_chg_i)
n_chg_i=len(merged_pct[merged_pct.loc[:,'pct_chg_dat'].notnull() & merged_pct.loc[:,'pct_chg_ind'].notnull()])
n_change.append(n_chg_i)
if (cor_chg_i==1 or cor_chg_i==-1):
t_change.append(None)
else:
t_change.append(cor_chg_i*sqrt(((n_chg_i-2)/(1-(cor_chg_i*cor_chg_i)))))
if t_lim==0:
almost_there = pd.DataFrame(list(zip(indicators,cors,n,cors_chg,n_change)),columns=['Indicator','Correlation','n','Correlation_change','n_change']).sort_values(by='Correlation_change',key=abs,ascending=False).set_index('Indicator')
return almost_there.loc[(almost_there.n_change>nlim) & ((almost_there.Correlation_change>cor_lim) | (almost_there.Correlation_change<-cor_lim))].head(k)
if t_lim!=0:
almost_there = pd.DataFrame(list(zip(indicators,cors,n,t,cors_chg,n_change,t_change)),columns=['Indicator','Correlation','n','t','Correlation_change','n_change','t_change']).sort_values(by='Correlation_change',key=abs,ascending=False).set_index('Indicator')
return almost_there.loc[(almost_there.n_change>nlim) & ((almost_there.Correlation_change>cor_lim) | (almost_there.Correlation_change<-cor_lim)) & ((almost_there.t_change>t_lim) | (almost_there.t_change<-t_lim))].head(k)
pd.options.mode.chained_assignment = orig_value
def wb_every(data,col,k=5,change=False,nlim=1,cor_lim=0,t_lim=0):
pd.options.mode.chained_assignment = None # Change option within function to avoid warning of value being placed on a copy of a slice.
"""
Returns the k variables from the World Bank Dataset with the strongest relationship with an input variable, sorted by the strength of the relationship.
Relationship can be either the correlation that the input variable has with the variables from the World Bank Data or the correlation in the annual percent change of the variables.
Parameters
----------
data: A pandas dataframe that contains a column of countries called "Country," a column of years called "Year," and a column of data for a variable
col: The integer index of the column in which the data of your variable exists in your dataframe
search: The search to conduct. Variables that match the given search will be identified and their relationships with the input variable found.
k: An integer indicating the number of variables to return. The k variables with the strongest relationship with the input variable will be returned.
change: A Boolean value. When set to True, the correlation between the annual percent change of the input variable and the annual percent change of
chosen indicator(s) will be found and used to order the strength of relationships
nlim: An integer indicating the minimum n of indicators to be reported.
cor_lim: A real number indicating the minimum absolute value of the correlation between the input variable and World Bank indicators to be reported
t_lim: A real number indicating the minimum t score of the correlation between the input variable and World Bank indicators to be reported.
Returns
----------
Pandas DataFrame
A Pandas DataFrame containing the indicator names as the index and the correlation between the indicator and the input variable. If change set to True,
additional columns including the correlation between the annual percent changes of the variables and the number of observations included in this calculation
will be included. The DataFrame is ordered on the correlation if change is set to False and on the correlation of percent changes if change is set to True.
The number of rows in the dataframe will be, at most, k. The number of columns will depend on the settings of change, nlim, and t_lim.
"""
from math import sqrt
assert 'Country' in data.columns, "data must have a column containing countries called 'Country'"
assert 'Year' in data.columns, "data must have a column containing years called 'Year'"
assert type(col)==int, "col must be an integer of a column index that exists in data"
assert col<data.shape[1], "col must be a column index belonging to data"
assert type(change)==bool, "change must be a Boolean value (True or False)"
assert type(k)==int, "k must be an integer"
assert type(nlim)==int, "n must be an integer"
assert (type(cor_lim)==float or type(cor_lim)==int), "cor_lim must be a real number"
assert (type(t_lim)==float or type(t_lim)==int), "n_lim must be a real number"
pd.options.mode.chained_assignment = None
here_we_go=pd.read_xml(requests.get('http://api.worldbank.org/v2/indicator?per_page=20100').content)
cors=[]
indicators=[]
n=[]
t=[]
for indic in here_we_go['id']:
try:
thing=pd.DataFrame(wb.get_series(indic,mrv=50)).reset_index()
except:
pass
merged=pd.merge(data,thing,how='left',on=['Country','Year'])
n_i=(len(merged[merged.iloc[:,col].notnull() & merged.iloc[:,(merged.shape[1]-1)].notnull()]))
n.append(n_i)
cor_i=merged.iloc[:,col].corr(merged.iloc[:,(merged.shape[1]-1)])
cors.append(cor_i)
if cor_i==1 or cor_i==-1: # Avoid division by 0
t.append(None)
else:
t.append((cor_i*(sqrt((n_i-2)/(1-(cor_i*cor_i))))))
indicators.append(thing.loc[0,'Series'])
if change==False:
if t_lim==0:
almost_there = pd.DataFrame(list(zip(indicators,cors,n)),columns=['Indicator','Correlation','n']).sort_values(by='Correlation',key=abs,ascending=False).set_index('Indicator')
return almost_there.loc[(almost_there.n>nlim) & ((almost_there.Correlation>cor_lim) | (almost_there.Correlation<-cor_lim))].head(k)
if t_lim != 0:
almost_there = pd.DataFrame(list(zip(indicators,cors,n,t)),columns=['Indicator','Correlation','n','t']).sort_values(by='Correlation',key=abs,ascending=False).set_index('Indicator')
return almost_there.loc[(almost_there.n>nlim) & ((almost_there.Correlation>cor_lim) | (almost_there.Correlation<-cor_lim)) & ((almost_there.t>t_lim) | (almost_there.t<-t_lim))].head(k)
if change==True:
cors_change=[]
n_change=[]
t_change=[]
mumbo=pd.DataFrame() # Create a Pandas DataFrame with the data on the chosen indicator using the world_bank_data package
for country in data['Country'].unique():
s=data[data['Country']==country]
s.loc[:,'lag_dat']=s.iloc[:,col].shift(-1) # Generates warning message if pandas option is not changed above
s.loc[:,'pct_chg_dat']=(((s.iloc[:,col]-s['lag_dat'])/s['lag_dat'])*100)
mumbo=pd.concat([mumbo,s])
for indic in here_we_go['id']:
jumbo=pd.DataFrame() #Empty dataframe to contain the percent change data for World Bank data
try:
thing=pd.DataFrame(wb.get_series(indic,mrv=50)).reset_index()
except:
pass
for country in thing['Country'].unique():
t=thing[thing['Country']==country]
t.loc[:,'lag_ind']=t.iloc[:,3].shift(-1) # Generates warning message if pandas option is not changed above
t.loc[:,'pct_chg_ind']=(((t.iloc[:,3]-t['lag_ind'])/t['lag_ind'])*100)
jumbo= | pd.concat([jumbo,t]) | pandas.concat |
import os
import sys
import pandas as pd
from libs.ModelFactory import ModelFactory
from measure.ModelWordMeter import ModelWordMeter
from reports.AbstractReport import AbstractReport
# Add modules to system path, needed when starting the script from the shell
# Furter details see: https://stackoverflow.com/questions/16981921/relative-imports-in-python-3
PACKAGE_PARENT = '../'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
class ReportScorePerWord(AbstractReport):
"""Report the score of each model per number of words from 1 to 20.
Sample Output
-------------
ReportScorePerWord
1 2 3 4 ... 17 18 19 20
LangDetect 0.339 0.525 0.664 0.757 ... 0.983 0.986 0.984 0.985
LangDetectSpacy 0.344 0.531 0.665 0.748 ... 0.984 0.984 0.985 0.984
LangFromStopwords 0.308 0.505 0.625 0.704 ... 0.973 0.981 0.981 0.981
LangFromChars 0.391 0.538 0.692 0.768 ... 0.945 0.949 0.947 0.948
AzureTextAnalytics 0.566 0.754 0.847 0.888 ... 0.987 0.987 0.988 0.990
[5 rows x 20 columns]
Time elapsed: 3887.51 sec
Report saved to: outcome/ReportScorePerWord.csv
"""
def __init__(self):
AbstractReport.__init__(self, "ReportScorePerWord")
self.meter = ModelWordMeter()
self.csv_file = "articles_all_1k.csv"
def eval(self, all):
# init
models = ModelFactory().create(all_models=all);
col_names = list(range(1, self.meter.max_nr_of_words+1))
row_names = []
rows = []
# read test data
data = pd.read_csv(self.csv_path + self.csv_file, sep=',')
# predict scores per number of words for each model
for i, model in enumerate(models):
print("> start evaluate", model.library_name, "...")
row = self.meter.score_df(model, data)
row_names.append(model.library_name)
rows.append(row)
# create and return result
df = | pd.DataFrame(rows, row_names, col_names) | pandas.DataFrame |
""" Bout utility methods
Methods for extracting bouts from DataFrames and annotating other DataFrames
with this bout information.
A bout is a time range within a larger set of data that shares a particular feature.
"""
import pandas as pd
def extract_bouts(
df, valid, range_column="t", valid_column="valid", keep_invalid=True, by=[]
):
"""
Extract from a Pandas DataFrame a list of bouts, where each bout is indicated by a minimum and maximum
timestamp range and determined by valid ranges.
Parameters
----------
df : pandas.Dataframe
The data to extract the bouts from
valid : pandas.Series
A series of bool values indicating what rows are considered valid and invalid
range_column : str
Optional string indicating the column in df for the timestamp
valid_column : str
Optional string indicating the name in the output DataFrame indicating the validness of the bout
keep_invalid : bool
Optional value indicating whether to keep invalid bouts in the returned DataFrame
by : list
Optional list of columns to group the data by, before extracting the bouts, meaning that bout
boundaries are both determined by the valid column and the group boundaries.
Returns
-------
pandas.DataFrame
Index:
RangeIndex
Columns:
Name: count, dtype: int64
Number of rows in df belonging to the bout
Name: t_start, dtype: datetime64[ns]
Starting timestamp of the bout (t_ prefix depends on range_column)
Name: t_end, dtype: datetime64[ns]
End timestamp of the bout (t_ prefix depends on range_column)
Name: valid, dtype: bool
Whether the bout is valid according to given criterium
"""
df["_filter"] = valid.loc[df.index]
dfBool = valid != valid.shift()
dfCumsum = dfBool.cumsum()
by_list = [dfCumsum]
for b in by:
by_list.append(b)
groups = df.groupby(by=by_list, sort=False)
bouts = groups.agg({range_column: ["count", "min", "max"], "_filter": ["first"]})
bouts.columns = [
"count",
range_column + "_start",
range_column + "_end",
valid_column,
]
df.drop(columns=["_filter"], inplace=True)
if not keep_invalid:
bouts = bouts[bouts[valid_column]]
bouts.reset_index(drop=True, inplace=True)
return bouts
def with_padded_bout_window(bouts, window=[0, 0], range_column="t"):
"""
Pad the values in a Pandas DataFrame created with `extract_bouts` with a time window.
Parameters
----------
bouts : pandas.Dataframe
The DataFrame containing the bouts
window : list
The number of seconds to add to the starting and end time of the bout.
range_column : str
Optional string indicating the column in original for the timestamp. This results in a prefix
in the bouts DataFrame, timestamp column 't' leads to bout columns 't_min' and 't_max'.
Returns
-------
pandas.DataFrame
A copy of the bouts DataFrame with padded min and max timestamp values
"""
bouts = bouts.copy()
bouts[range_column + "_start"] = bouts[range_column + "_start"] + pd.to_timedelta(
window[0], unit="s"
)
bouts[range_column + "_end"] = bouts[range_column + "_end"] + pd.to_timedelta(
window[1], unit="s"
)
return bouts
def add_bouts_as_column(
df,
bouts,
new_column="bout",
range_column="t",
valid_column="valid",
value="column",
reset_value=pd.Series(dtype="float64"),
):
"""
Applies the time ranges in a bouts DataFrame created with `extract_bouts` to the rows in another DataFrame, by
adding bout data to a new column.
Parameters
----------
df : pandas.DataFrame
The DataFrame containing the data that has to be annotated by bout information
bouts : pandas.Dataframe
The DataFrame containing the bouts
new_column : str
The optional column name to add with bout information
range_column : str
Optional string indicating the column in original for the timestamp. This results in a prefix
in the bouts DataFrame, timestamp column 't' leads to bout columns 't_start' and 't_end'.
valid_column : str
Optional string indicating the name in the output DataFrame indicating the validness of the bout
value : object
Optional value to insert for a valid bout. If 'index' it takes the bout index as bout identifier,
'column' fills in the valid column else sets the constant value given.
reset_value : object
Optional default value set to the new bouts column if it does not yet exist
Returns
-------
pandas.DataFrame
A reference to the updated df DataFrame. The original DataFrame is updated in place.
"""
if new_column not in df:
df[new_column] = reset_value
for idx, bout in bouts.iterrows():
df.loc[
(df[range_column] >= bout[range_column + "_start"])
& (df[range_column] < bout[range_column + "_end"]),
new_column,
] = (
bout[valid_column]
if value == "column"
else idx
if value == "index"
else value
)
return df
def interpolate_bouts_as_column(
df,
df_values,
bouts,
new_column="bout",
range_column="t",
valid_column="valid",
value_column="position",
reset_value= | pd.Series(dtype="float64") | pandas.Series |
#%%
import pandas as pd
import numpy as np
from datetime import datetime
import os
import pickle
# List down file paths
#dir_data = "../smoking-lvm-cleaned-data/final"
exec(open('../env_vars.py').read())
dir_data = os.environ['dir_data']
dir_picklejar = os.environ['dir_picklejar']
# Read in data
data_dates = pd.read_csv(os.path.join(os.path.realpath(dir_data), 'participant-dates.csv'))
data_selfreport = pd.read_csv(os.path.join(os.path.realpath(dir_data), 'self-report-smoking-final.csv'))
data_hq_episodes = pd.read_csv(os.path.join(os.path.realpath(dir_data), 'hq-episodes-final.csv'))
#%%
###############################################################################
# Data preparation: data_dates data frame
###############################################################################
# Create unix timestamps corresponding to 12AM of a given human-readable date
data_dates["start_date_unixts"] = (
data_dates["start_date"]
.apply(lambda x: datetime.strptime(x, "%m/%d/%Y"))
.apply(lambda x: datetime.timestamp(x))
)
data_dates["quit_date_unixts"] = (
data_dates["quit_date"]
.apply(lambda x: datetime.strptime(x, "%m/%d/%Y"))
.apply(lambda x: datetime.timestamp(x))
)
data_dates["expected_end_date_unixts"] = (
data_dates["expected_end_date"]
.apply(lambda x: datetime.strptime(x, "%m/%d/%Y"))
.apply(lambda x: datetime.timestamp(x))
)
data_dates["actual_end_date_unixts"] = (
data_dates["actual_end_date"]
.apply(lambda x: datetime.strptime(x, "%m/%d/%Y"))
.apply(lambda x: datetime.timestamp(x))
)
# More tidying up
data_dates = (
data_dates
.rename(columns={"participant": "participant_id",
"quit_date": "quit_date_hrts",
"start_date": "start_date_hrts",
"actual_end_date": "actual_end_date_hrts",
"expected_end_date": "expected_end_date_hrts"})
.loc[:, ["participant_id",
"start_date_hrts","quit_date_hrts",
"expected_end_date_hrts", "actual_end_date_hrts",
"start_date_unixts", "quit_date_unixts",
"expected_end_date_unixts","actual_end_date_unixts"]]
)
#%%
###############################################################################
# Merge data_selfreport with data_dates
###############################################################################
data_selfreport = data_dates.merge(data_selfreport,
how = 'left',
on = 'participant_id')
#%%
###############################################################################
# Data preparation: data_selfreport data frame
###############################################################################
# Drop the participants labelled 10X as they are pilot individuals
data_selfreport = data_selfreport.dropna(how = 'any', subset=['hour'])
def calculate_delta(message):
sr_accptresponse = ['Smoking Event(less than 5 minutes ago)',
'Smoking Event(5 - 15 minutes ago)',
'Smoking Event(15 - 30 minutes ago)',
'Smoking Event(more than 30 minutes ago)']
sr_dictionary = {'Smoking Event(less than 5 minutes ago)': 1,
'Smoking Event(5 - 15 minutes ago)': 2,
'Smoking Event(15 - 30 minutes ago)': 3,
'Smoking Event(more than 30 minutes ago)': 4}
if message in sr_accptresponse:
# Convert time from minutes to seconds
use_delta = sr_dictionary[message]
else:
# If participant reported smoking more than 30 minutes ago,
# then we consider time s/he smoked as missing
use_delta = pd.NA
return use_delta
def round_day(raw_day):
if pd.isna(raw_day):
# Missing values for raw_day can occur
# if participant reported smoking more than 30 minutes ago
out_day = pd.NA
else:
# This takes care of the instances when participant reported to smoke
# less than 30 minutes ago
if raw_day >= 0:
# If on or after Quit Date, round down to the nearest integer
# e.g., floor(2.7)=2
out_day = np.floor(raw_day)
else:
# If before Quit Date, round up to the nearest integer
# e.g., ceil(-2.7)=-2
out_day = np.ceil(raw_day)
return out_day
#%%
data_selfreport['date'] = | pd.to_datetime(data_selfreport.date) | pandas.to_datetime |
from simulationClasses import DCChargingStations, Taxi, Bus, BatterySwappingStation
import numpy as np
import pandas as pd
from scipy import stats, integrate
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from matplotlib.dates import DateFormatter, HourLocator, MinuteLocator, AutoDateLocator
import seaborn as sns
import csv
import sys
from datetime import datetime,date,timedelta
import random
from math import ceil
import math
sns.set_context("paper")
sns.set(font_scale=2)
sns.set_style("whitegrid", {
"font.family": "serif",
"font.serif": ["Times", "Palatino", "serif"],
'grid.color': '.9',
'grid.linestyle': '--',
})
taxiChargingStation = DCChargingStations(5)
taxiFleet =[]
for i in range(100):
newTaxi = Taxi()
newTaxi.useSwapping = 0
taxiFleet.append(newTaxi)
busChargingStation = DCChargingStations(5)
busFleet = []
for i in range(20):
newBus = Bus()
newBus.useSwapping = 0
busFleet.append(newBus)
time = 0
taxiIncome = []
busIncome = []
taxiChargerIncome = []
busChargerIncome = []
while time < 24*60*7:
tempTaxiFleet = []
todayTaxiIncome = 0
todayBusIncome = 0
for runningTaxi in taxiFleet:
runningTaxi.decideChargeMode(time)
if runningTaxi.chargingMode == 1:
taxiChargingStation.addCharge(runningTaxi)
else:
runningTaxi.getTravelSpeed(time)
tempTaxiFleet.append(runningTaxi)
taxiFleet = tempTaxiFleet
tempChargingVehicles = []
for chargingTaxi in taxiChargingStation.chargingVehicles:
chargingTaxi.decideChargeMode(time)
if chargingTaxi.chargingMode == 0:
chargingTaxi.getTravelSpeed(time)
taxiFleet.append(chargingTaxi)
else:
chargingTaxi.charge(time,0,taxiChargingStation.chargeSpeed)
tempChargingVehicles.append(chargingTaxi)
taxiChargingStation.chargingVehicles = tempChargingVehicles
while taxiChargingStation.numberOfStations - len(taxiChargingStation.chargingVehicles) > 0:
if len(taxiChargingStation.pendingVehicles) > 0:
newChargeTaxi = taxiChargingStation.pendingVehicles.pop(0)
newChargeTaxi.charge(time,0,taxiChargingStation.chargeSpeed)
taxiChargingStation.chargingVehicles.append(newChargeTaxi)
else:
break
taxiChargingStation.charge()
tempBusFleet = []
for runningBus in busFleet:
runningBus.decideChargeMode(time)
if runningBus.chargingMode == 1:
busChargingStation.addCharge(runningBus)
else:
runningBus.getTravelSpeed(time)
tempBusFleet.append(runningBus)
busFleet = tempBusFleet
tempChargingVehicles = []
for chargingBus in busChargingStation.chargingVehicles:
chargingBus.decideChargeMode(time)
if chargingBus.chargingMode == 0:
chargingBus.getTravelSpeed(time)
busFleet.append(chargingBus)
else:
chargingBus.charge(time, 0, busChargingStation.chargeSpeed)
tempChargingVehicles.append(chargingBus)
busChargingStation.chargingVehicles = tempChargingVehicles
while busChargingStation.numberOfStations - len(busChargingStation.chargingVehicles) > 0:
if len(busChargingStation.pendingVehicles) > 0:
newChargeBus = busChargingStation.pendingVehicles.pop(0)
newChargeBus.charge(time, 0, busChargingStation.chargeSpeed)
busChargingStation.chargingVehicles.append(newChargeBus)
else:
break
busChargingStation.charge()
for taxi in taxiFleet + taxiChargingStation.chargingVehicles + taxiChargingStation.pendingVehicles:
todayTaxiIncome += taxi.income
for bus in busFleet + busChargingStation.chargingVehicles + busChargingStation.pendingVehicles:
todayBusIncome += bus.income
taxiIncome.append([time,todayTaxiIncome,len(taxiFleet),len(taxiChargingStation.chargingVehicles),len(taxiChargingStation.pendingVehicles)])
busIncome.append([time,todayBusIncome,len(busFleet),len(busChargingStation.chargingVehicles),len(busChargingStation.pendingVehicles)])
taxiChargerIncome.append([time,taxiChargingStation.income])
busChargerIncome.append([time, busChargingStation.income])
time += 1
taxiIncomeDataFrame = pd.DataFrame(taxiIncome,columns=["time","income","running","charging","waiting"])
busIncomeDataFrame = pd.DataFrame(busIncome,columns=["time","income","running","charging","waiting"])
taxiChargerIncomeDataFrame = pd.DataFrame(taxiChargerIncome,columns=["time","income"])
busChargerIncomeDataFrame = pd.DataFrame(busChargerIncome,columns=["time","income"])
plt.figure(figsize=(9, 16), dpi=1600)
ax = plt.subplot(4,1,1)
for day in range(7):
plt.axvspan(2*60 + day*60*24, 5*60 + day*60*24, facecolor='g', alpha=0.1)
plt.axvspan(18*60 + day*60*24, 21*60 + day*60*24, facecolor='r', alpha=0.1)
ax2 = plt.subplot(4,1,2)
ax3 = plt.subplot(4,1,3)
for day in range(7):
plt.axvspan(2*60 + day*60*24, 5*60 + day*60*24, facecolor='g', alpha=0.1)
plt.axvspan(18*60 + day*60*24, 21*60 + day*60*24, facecolor='r', alpha=0.1)
ax4 = plt.subplot(4,1,4)
taxiIncomeDataFrame.plot(x="time",y="income",ax=ax,label="")
taxiIncomeDataFrame.plot(x="time",y="running",ax=ax2,label="Running",style="-")
taxiIncomeDataFrame.plot(x="time",y="charging",ax=ax2,label="Charging", style=":")
taxiIncomeDataFrame.plot(x="time",y="waiting",ax=ax2,label="Waiting",style="-.")
busIncomeDataFrame.plot(x="time",y="income",ax=ax3,label="")
busIncomeDataFrame.plot(x="time",y="running",ax=ax4,label="Running",style="-")
busIncomeDataFrame.plot(x="time",y="charging",ax=ax4,label="Charging",style=":")
busIncomeDataFrame.plot(x="time",y="waiting",ax=ax4,label="Waiting",style="-.")
ax.set(ylabel= "Income ($)", xlabel='Time (min)')
ax.legend_.remove()
ax2.set(ylabel= "Number", xlabel='Time (min)')
ax3.set(ylabel= "Income ($)", xlabel='Time (min)')
ax3.legend_.remove()
ax4.set(ylabel= "Number", xlabel='Time (min)')
plt.tight_layout()
box = ax2.get_position()
ax2.set_position([box.x0, box.y0, box.width * 0.8, box.height])
box = ax4.get_position()
ax4.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax2.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
ax4.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.savefig('busTaxiSimulationResult.pdf', bbox_inches='tight')
# print(taxiIncomeDataFrame)
print(busIncomeDataFrame)
#
# print(taxiIncomeDataFrame.sum())
# print(taxiIncomeDataFrame.sum()/24/7/60/100)
# print("tc1:")
# print(taxiIncomeDataFrame[(taxiIncomeDataFrame["time"]%(24*60) > 18*60) & (taxiIncomeDataFrame["time"]%(24*60) < 21*60)].sum()/60/3/7/100)
# print("tc2:")
# print(taxiIncomeDataFrame[(taxiIncomeDataFrame["time"]%(24*60) > 2*60) & (taxiIncomeDataFrame["time"]%(24*60) < 5*60)].sum()/60/3/7/100)
# print("tc3:")
# print(taxiIncomeDataFrame[(taxiIncomeDataFrame["time"]%(24*60) > 5*60) & (taxiIncomeDataFrame["time"]%(24*60) < 18*60)].sum()/60/13/7/100)
# print("tc4:")
# print(taxiIncomeDataFrame[(taxiIncomeDataFrame["time"]%(24*60) > 21*60) | (taxiIncomeDataFrame["time"]%(24*60) < 2*60)].sum()/60/5/7/100)
print(busIncomeDataFrame.sum())
print(busIncomeDataFrame.sum()/24/7/60/100)
print("tc1:")
print(busIncomeDataFrame[(busIncomeDataFrame["time"]%(24*60) > 18*60) & (busIncomeDataFrame["time"]%(24*60) < 21*60)].sum()/60/7/20)
print("tc2:")
print(busIncomeDataFrame[(busIncomeDataFrame["time"]%(24*60) > 2*60) & (busIncomeDataFrame["time"]%(24*60) < 5*60)].sum()/60/7/20)
print("tc3:")
print(busIncomeDataFrame[(busIncomeDataFrame["time"]%(24*60) > 5*60) & (busIncomeDataFrame["time"]%(24*60) < 18*60)].sum()/60/7/20)
print("tc4:")
print(busIncomeDataFrame[(busIncomeDataFrame["time"]%(24*60) > 21*60) | (busIncomeDataFrame["time"]%(24*60) < 2*60)].sum()/60/7/20)
taxiSwappingStation = BatterySwappingStation(5, 30)
taxiFleet =[]
for i in range(100):
newTaxi = Taxi()
newTaxi.useSwapping = 1
taxiFleet.append(newTaxi)
busSwappingStation = BatterySwappingStation(5, 324)
busFleet = []
for i in range(20):
newBus = Bus()
newBus.useSwapping = 1
busFleet.append(newBus)
time = 0
taxiIncome = []
busIncome = []
taxiSwapperIncome = []
busSwapperIncome = []
swapRecord = []
while time < 24*60*7:
tempTaxiFleet = []
todayTaxiIncome = 0
todayBusIncome = 0
taxiMileage = 0
for runningTaxi in taxiFleet:
runningTaxi.decideChargeMode(time)
if runningTaxi.chargingMode == 1:
result = taxiSwappingStation.addVehicle(runningTaxi)
swapRecord.append([time, runningTaxi.remainingBatterykWh])
if result > 0:
runningTaxi.charge(time,result,0)
# print("get into queue:" + str(time))
taxiSwappingStation.swappingVehicles.append(runningTaxi)
else:
runningTaxi.getTravelSpeed(time)
tempTaxiFleet.append(runningTaxi)
taxiFleet = tempTaxiFleet
tempSwappingVehicles = []
for swappingTaxi in taxiSwappingStation.swappingVehicles:
swappingTaxi.charge(time,0,0)
if swappingTaxi.chargingMode == 0:
swappingTaxi.getTravelSpeed(time)
taxiFleet.append(swappingTaxi)
else:
tempSwappingVehicles.append(swappingTaxi)
taxiSwappingStation.swappingVehicles = tempSwappingVehicles
while len(taxiSwappingStation.pendingVehicles):
if len(taxiSwappingStation.swappingVehicles) < taxiSwappingStation.numberOfSlot:
newTaxi = taxiSwappingStation.pendingVehicles.pop(0)
result = taxiSwappingStation.swap(newTaxi.remainingBatterykWh)
newTaxi.charge(time,result,0)
# print("bump from pending to swap:" + str(time))
taxiSwappingStation.swappingVehicles.append(newTaxi)
else:
break
tempBusFleet = []
for runningBus in busFleet:
runningBus.decideChargeMode(time)
if runningBus.chargingMode == 1:
result = busSwappingStation.addVehicle(runningBus)
if result > 0:
runningBus.charge(time, result, 0)
busSwappingStation.swappingVehicles.append(runningBus)
else:
runningBus.getTravelSpeed(time)
tempBusFleet.append(runningBus)
busFleet = tempBusFleet
tempSwappingVehicles = []
for swappingBus in busSwappingStation.swappingVehicles:
swappingBus.charge(time, 0, 0)
if swappingBus.chargingMode == 0:
swappingBus.getTravelSpeed(time)
busFleet.append(swappingBus)
else:
tempSwappingVehicles.append(swappingBus)
busSwappingStation.swappingVehicles = tempSwappingVehicles
while len(busSwappingStation.pendingVehicles) > 0:
if len(busSwappingStation.swappingVehicles) < busSwappingStation.numberOfSlot:
newBus = busSwappingStation.pendingVehicles.pop(0)
result = busSwappingStation.swap(newBus.remainingBatterykWh)
newBus.charge(time, result, 0)
busSwappingStation.swappingVehicles.append(newBus)
else:
break
for taxi in taxiFleet + taxiSwappingStation.swappingVehicles + taxiSwappingStation.pendingVehicles:
todayTaxiIncome += taxi.income
for bus in busFleet + busSwappingStation.swappingVehicles + busSwappingStation.pendingVehicles:
todayBusIncome += bus.income
taxiIncome.append([time,todayTaxiIncome,len(taxiFleet),len(taxiSwappingStation.swappingVehicles),len(taxiSwappingStation.pendingVehicles),\
len(taxiFleet)+len(taxiSwappingStation.swappingVehicles)+len(taxiSwappingStation.pendingVehicles)])
busIncome.append([time,todayBusIncome,len(busFleet),len(busSwappingStation.swappingVehicles),len(busSwappingStation.pendingVehicles), \
len(busFleet) + len(busSwappingStation.swappingVehicles) + len(busSwappingStation.pendingVehicles)])
taxiSwapperIncome.append([time, taxiSwappingStation.income])
busSwapperIncome.append([time, busSwappingStation.income])
time += 1
taxiIncomeDataFrame = pd.DataFrame(taxiIncome,columns=["time","income","running","swapping","waiting","total"])
busIncomeDataFrame = | pd.DataFrame(busIncome,columns=["time","income","running","swapping","waiting","total"]) | pandas.DataFrame |
import numpy as np, pandas as pd
pd.set_option("display.precision", 4)
from sklearn.metrics import log_loss
import matplotlib.pyplot as plt
import argparse
from pathlib import Path
SUB_PATH = Path("submissions")
INPUT_PATH = Path("input")
IMG_PATH = Path("")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-test', '--test_file_name', type=str, default='gap-test.tsv',
help='Filename to read true labels from')
parser.add_argument('-result', '--result_file_name', type=str, default='result.csv',
help='Filename to write results to')
return parser.parse_args()
def read_predictions():
# BERT fine-tuning predictions
ft1 = pd.read_csv(SUB_PATH/"gap_paper_out_12802_kenkrige_results.csv").drop(columns = "ID")
ft2 = pd.read_csv(SUB_PATH/"gap_paper_out_12803_kenkrige_results.csv").drop(columns = "ID")
ft3 = pd.read_csv(SUB_PATH/"gap_paper_out_12804_kenkrige_results.csv").drop(columns = "ID")
ft4 = pd.read_csv( SUB_PATH/"gap_paper_out_6400_kenkrige_results.csv").drop(columns = "ID")
ft5 = pd.read_csv( SUB_PATH/"gap_paper_out_6402_kenkrige_results.csv").drop(columns = "ID")
ft6 = | pd.read_csv( SUB_PATH/"gap_paper_out_6404_kenkrige_results.csv") | pandas.read_csv |
# Copyright 2019 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
import numpy as np
import json
import argparse
import pandas as pd
np.random.seed(9999)
import boto3
import botocore
class Namespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def parse_args(json_file=None):
args = defaults
if json_file:
with open(json_file, 'r') as f:
json_args = json.load(f)
args.update(json_args)
return Namespace(**args)
def get_aws_data(args):
session = boto3.Session(
aws_access_key_id=args.access_id,
aws_secret_access_key=args.token,
region_name='us-east-1'
)
s3 = session.resource('s3')
try:
s3.Bucket(args.bucket).download_file(args.seg_file, '/data/seg.npy')
s3.Bucket(args.bucket).download_file(args.lbl_file, '/data/syn.npy')
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
seg_data = np.load('/data/seg.npy')
syn_data = np.load('/data/syn.npy')
return seg_data, syn_data
def edge_list_cv(neurons, synapses, dilation=5, syn_thres=0.8, blob_thres=4000):
from scipy.stats import mode
import numpy as np
from skimage.measure import label
import skimage.morphology as morpho
from skimage.measure import regionprops
synapses_dil = np.zeros_like(synapses)
for z in range(0,synapses.shape[2]):
synapses_dil[:,:,z] = morpho.dilation(synapses[:,:,z], selem=morpho.disk(dilation)) # find synapse objects
synapses_dil = synapses_dil/255
print(np.max(synapses_dil))
print(np.min(synapses_dil))
threshold = 0.8
synapses_dil,num = label((synapses_dil>threshold).astype(int),connectivity=1,background=0,return_num=True)
syn_regions = regionprops(synapses_dil)
for region in syn_regions:
if(region['area']<blob_thres):
inds = region['bbox']
synapses_dil[inds[0]:inds[3],inds[1]:inds[4],inds[2]:inds[5]] = 0
synid = np.unique(synapses_dil)
synid = synid[synid > 0]
print(len(synid))
syn_ids = []
x = []
y = []
z = []
post = []
pre = []
postNan = []
preNan = []
neusynlist = {}
synlist = {}
for s in synid:
postval = 0
preval = 0
temp = (synapses_dil == s).astype(int)
regions = regionprops(temp)
for props in regions:
x0, y0, z0 = props.centroid #poss pull should put in x,y,z
break #only get first, in case there is some weird badness
#print str(s).zfill(4),
try:
val = np.ravel(neurons[synapses_dil == s])
val = val[val > 0]
postval = mode(val)[0][0]
val = val[val != postval]
preval = mode(val)[0][0]
syn_ids.append(s)
x.append(x0)
y.append(y0)
z.append(z0)
post.append(postval)
pre.append(preval)
postNan.append(np.nan)
preNan.append(np.nan)
except:
print('skipping this id')
print('complete')
neusynlist['syn_ids'] = syn_ids
neusynlist['xs'] = x
neusynlist['ys'] = y
neusynlist['zs'] = z
neusynlist['pres'] = pre
neusynlist['posts'] = post
synlist['syn_ids'] = syn_ids
synlist['xs'] = x
synlist['ys'] = y
synlist['zs'] = z
synlist['pres'] = preNan
synlist['posts'] = postNan
return (neusynlist,synlist)
if __name__ == '__main__':
# -------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Seg syn association')
parser.set_defaults(func=lambda _: parser.print_help())
parser.add_argument(
'--bucket',
required=False,
help='s3 bucket'
)
parser.add_argument(
'--token',
required=False,
help='s3 bucket token'
)
parser.add_argument(
'--access_id',
required=False,
help='s3 bucket access_id'
)
parser.add_argument(
'--seg_file',
required=False,
help='Local segmentation file'
)
parser.add_argument(
'--lbl_file',
required=False,
help='Local synapse file'
)
parser.add_argument(
'-d',
'--dilation',
default=5,
required=False,
help='Dilation of synapses')
parser.add_argument(
'-t',
'--threshold',
default=0.8,
required=False,
help='Synapse threshold')
parser.add_argument(
'-b',
'--blob',
default=4000,
required=False,
help='Blob size threshold')
parser.add_argument(
'--output',
required=True,
help='Synapse frame'
)
parser.add_argument(
'--output_noneu',
required=True,
help='No neuron frame'
)
args = parser.parse_args()
seg, syn = get_aws_data(args)
threshold = 0.8
if args.threshold:
threshold = args.threshold
blob_thres = 4000
if args.blob:
blob_thres = args.blob
dilation = 5
if args.dilation:
dilation = args.dilation
neu_syn_list,syn_list = edge_list_cv(seg, syn, dilation=dilation, syn_thres=threshold, blob_thres=blob)
neu_syn_list = pd.DataFrame.from_dict(neu_syn_list)
syn_list = | pd.DataFrame.from_dict(syn_list) | pandas.DataFrame.from_dict |
#mcandrew
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sys.path.append("../")
from mods.datahelp import grabData, grabJHUData, grabDHSdata
class reportBuilder(object):
def __init__(self,gd):
self.predictions = gd.predictions()
self.qData = gd.questions()
self.quantiles = gd.quantiles()
self.metaData = gd.metaData()
def buildConsensusAndMedianPredictionText(self):
fromSurveyNumQidTXT2data = {}
for (surveyNum,qid),subset in self.quantiles.groupby(["surveynum","qid"]):
subset = subset.set_index(["quantile"])
median = subset.loc["0.5","value"]
_10thPct = subset.loc["0.1","value"]
_90thPct = subset.loc["0.9","value"]
if _10thPct > 10:
form="comma"
elif _10thPct >1:
form="1"
else:
form="2"
fromSurveyNumQidTXT2data[surveyNum,qid,"median",form] = median
fromSurveyNumQidTXT2data[surveyNum,qid,"_10",form] = _10thPct
fromSurveyNumQidTXT2data[surveyNum,qid,"_90",form] = _90thPct
self.reportDict = fromSurveyNumQidTXT2data
def addJHUdata(self,jhudata):
jhudata = jhudata.set_index(pd.to_datetime(jhudata.index))
mostRecentJhudata = jhudata.sort_index().iloc[-1,:]
dataDate = | pd.to_datetime(mostRecentJhudata.name) | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import os
from os import listdir
from os.path import isfile, join, getsize, dirname
from collections import Counter, OrderedDict
import shutil
import warnings
import h5py
import pickle
import gc
import random
import time
import matplotlib.pyplot as plt
warnings.filterwarnings('ignore')
path = "../../../../../../zion/OpenSNP/people"
meta = "../../../../../../zion/OpenSNP/meta"
beacons = "../../../../../zion/OpenSNP/beacon"
main_path = join(beacons, "Main2")
# In[2]:
def findUserIndex(fileName):
fileName = fileName[4:]
return int(fileName.split("_")[0])
def findFileIndex(fileName):
return int(fileName.split("_")[1][4:])
def findSkipCount(fileName):
filePath = join(path, fileName)
with open(filePath, "r") as f:
i = 0
for line in f:
if line[0] == "#" or line[0] == " ":
i += 1
else:
if line[0:6] == "rs-id":
i += 1
break
return i
def readClean(data):
# Remove X,Y,MT chromosomes
no_x_y = np.logical_and(data["chromosome"] != "X", data["chromosome"] != "Y")
data = data[np.logical_and(no_x_y, data["chromosome"] != "MT")]
data = data.fillna("NN")
data[data == "II"] = "NN"
data[data == "--"] = "NN"
data[data == "DD"] = "NN"
data[data == "DI"] = "NN"
return data.iloc[np.where(data.iloc[:, [1]] != "NN")[0]]
def readDf(file, rowSkip):
data = pd.read_csv(join(path, file), sep="\t", header=None, skiprows=rowSkip)
data.columns = ['rs_id', 'chromosome', 'position', 'allele']
del data['position']
data = data.set_index('rs_id')
data = data.rename(columns={"allele": findUserIndex(file)})
return data
def readFileComplete(fileName):
rowSkip = findSkipCount(fileName)
beacon = readDf(fileName, rowSkip)
beacon = readClean(beacon)
return beacon
def mergeClean(beacon):
beacon = beacon.loc[~beacon.index.duplicated(keep='first')]
beacon = beacon[ | pd.to_numeric(beacon['chr'], errors='coerce') | pandas.to_numeric |
import pandas as pd
import numpy as np
class DataParser:
@staticmethod
def _parse_companies(cmp_list):
"""
Создает DataFrame компаний по списку словарей из запроса
:param cmp_list: list of dicts
:return: pandas.DataFrame
"""
ret_df = pd.DataFrame(columns=['ID', 'TITLE', 'CMP_TYPE_CUSTOMER', 'CMP_TYPE_PARTNER'])
if cmp_list:
cmp_df = pd.DataFrame(cmp_list)
cmp_df['CMP_TYPE_CUSTOMER'] = cmp_df['COMPANY_TYPE'].apply(lambda x: 1 if (x == 'CUSTOMER') else 0)
cmp_df['CMP_TYPE_PARTNER'] = cmp_df['COMPANY_TYPE'].apply(lambda x: 1 if (x == 'PARTNER') else 0)
cmp_df = cmp_df.drop(columns=['COMPANY_TYPE'], axis=1)
ret_df = pd.concat([ret_df, cmp_df])
return ret_df
@staticmethod
def _parse_deals(deal_list):
"""
Создает DataFrame сделок по списку словарей из запроса
:param deal_list: list of dicts
:return: pandas.DataFrame
"""
ret_df = pd.DataFrame(columns=[
'OPPORTUNITY_DEAL_Q01', 'PROBABILITY_DEAL_Q01', 'TIME_DIFF_BEGIN_CLOSE_DEAL_Q01',
'OPPORTUNITY_DEAL_Q09', 'PROBABILITY_DEAL_Q09', 'TIME_DIFF_BEGIN_CLOSE_DEAL_Q09',
'OPPORTUNITY_DEAL_MEAN', 'PROBABILITY', 'TIME_DIFF_BEGIN_CLOSE_DEAL_MEAN', 'CLOSED',
'OPPORTUNITY_DEAL_MEDIAN', 'TIME_DIFF_BEGIN_CLOSE_DEAL_MEDIAN', 'DEAL_BY_YEAR'])
ret_df.index.name = 'COMPANY_ID'
if deal_list:
deal_df = pd.DataFrame(deal_list)
deal_df['CLOSED'] = deal_df['CLOSED'].apply(lambda x: 1 if (x == 'Y') else 0)
deal_df['OPPORTUNITY'] = pd.to_numeric(deal_df['OPPORTUNITY'])
deal_df['PROBABILITY'] = pd.to_numeric(deal_df['PROBABILITY'])
deal_df['BEGINDATE'] = pd.to_datetime(deal_df['BEGINDATE'])
deal_df['CLOSEDATE'] = pd.to_datetime(deal_df['CLOSEDATE'])
deal_df['TIME_DIFF_BEGIN_CLOSE'] = (deal_df['CLOSEDATE'] - deal_df['BEGINDATE']).astype(
'timedelta64[h]') / 24
deal_group = deal_df.groupby(by='COMPANY_ID')
deal_count = pd.DataFrame(deal_group['CLOSED'].count())
deal_date_max = deal_group['CLOSEDATE'].max()
deal_date_min = deal_group['BEGINDATE'].min()
d = {'YEAR': (deal_date_max - deal_date_min).astype('timedelta64[h]') / (24 * 365)}
deal_date_max_min_diff = pd.DataFrame(data=d)
deal_by_year = pd.DataFrame()
deal_by_year['DEAL_BY_YEAR'] = (deal_count['CLOSED'] / deal_date_max_min_diff['YEAR']).astype(np.float32)
deal_quantile01 = deal_group['OPPORTUNITY', 'PROBABILITY', 'TIME_DIFF_BEGIN_CLOSE'].quantile(0.1)
deal_quantile09 = deal_group['OPPORTUNITY', 'PROBABILITY', 'TIME_DIFF_BEGIN_CLOSE'].quantile(0.9)
deal_mean = deal_group['OPPORTUNITY', 'PROBABILITY', 'TIME_DIFF_BEGIN_CLOSE', 'CLOSED'].mean()
deal_median = deal_group['OPPORTUNITY', 'TIME_DIFF_BEGIN_CLOSE'].median()
deal_result = pd.merge(deal_quantile01, deal_quantile09, on='COMPANY_ID',
suffixes=['_DEAL_Q01', '_DEAL_Q09'])
deal_result1 = pd.merge(deal_mean, deal_median, on='COMPANY_ID', suffixes=['_DEAL_MEAN', '_DEAL_MEDIAN'])
deal_result = pd.merge(deal_result, deal_result1, on='COMPANY_ID')
deal_result = pd.merge(deal_result, deal_by_year, on='COMPANY_ID')
deal_result = deal_result.mask(np.isinf(deal_result))
ret_df = pd.concat([ret_df, deal_result])
return ret_df
@staticmethod
def _parse_invoices(inv_list):
"""
Создает DataFrame счетов по списку словарей из запроса
:param inv_list: list of dicts
:return: pandas.DataFrame
"""
ret_df = pd.DataFrame(columns=[
'PRICE_INV_Q01', 'TIME_DIFF_PAYED_BILL_INV_Q01', 'TIME_DIFF_PAYBEF_PAYED_INV_Q01',
'PRICE_INV_Q09', 'TIME_DIFF_PAYED_BILL_INV_Q09', 'TIME_DIFF_PAYBEF_PAYED_INV_Q09', 'PRICE_INV_MEAN',
'TIME_DIFF_PAYED_BILL_INV_MEAN', 'TIME_DIFF_PAYBEF_PAYED_INV_MEAN', 'PAYED', 'STATUS_ID_P',
'STATUS_ID_D', 'STATUS_ID_N', 'STATUS_ID_T', 'PRICE_INV_MEDIAN', 'TIME_DIFF_PAYED_BILL_INV_MEDIAN',
'TIME_DIFF_PAYBEF_PAYED_INV_MEDIAN', 'MONTH_TOGETHER_INV', 'DEAL_BY_YEAR'])
ret_df.index.name = 'UF_COMPANY_ID'
if inv_list:
inv_df = pd.DataFrame(inv_list)
inv_df['PRICE'] = pd.to_numeric(inv_df['PRICE'])
inv_df['DATE_BILL'] = pd.to_datetime(inv_df['DATE_BILL'])
inv_df['DATE_PAYED'] = pd.to_datetime(inv_df['DATE_PAYED'])
inv_df['DATE_PAY_BEFORE'] = pd.to_datetime(inv_df['DATE_PAY_BEFORE'])
inv_df['TIME_DIFF_PAYED_BILL'] = (inv_df['DATE_PAYED'] - inv_df['DATE_BILL']).astype('timedelta64[h]') / 24
inv_df['TIME_DIFF_PAYBEF_PAYED'] = (inv_df['DATE_PAY_BEFORE'] - inv_df['DATE_PAYED']).astype('timedelta64[h]') / 24
inv_df['PAYED'] = inv_df['PAYED'].apply(lambda x: 1 if (x == 'Y') else 0)
inv_df['STATUS_ID_P'] = inv_df['STATUS_ID'].apply(lambda x: 1 if (x == 'P') else 0)
inv_df['STATUS_ID_D'] = inv_df['STATUS_ID'].apply(lambda x: 1 if (x == 'D') else 0)
inv_df['STATUS_ID_N'] = inv_df['STATUS_ID'].apply(lambda x: 1 if (x == 'N') else 0)
inv_df['STATUS_ID_T'] = inv_df['STATUS_ID'].apply(lambda x: 1 if (x == 'T') else 0)
inv_group = inv_df.groupby(by='UF_COMPANY_ID')
inv_date_max = inv_group['DATE_PAYED'].max()
inv_date_min = inv_group['DATE_PAYED'].min()
inv_month_together = pd.DataFrame()
inv_month_together['MONTH_TOGETHER_INV'] = (inv_date_max - inv_date_min).astype('timedelta64[h]') / (
24 * 30)
inv_count = pd.DataFrame(inv_group['PAYED'].count())
inv_by_year = pd.DataFrame(
data={'DEAL_BY_YEAR': (inv_count['PAYED'] / inv_month_together['MONTH_TOGETHER_INV']) * 12})
inv_quantile01 = inv_group['PRICE', 'TIME_DIFF_PAYED_BILL', 'TIME_DIFF_PAYBEF_PAYED'].quantile(0.1)
inv_quantile09 = inv_group['PRICE', 'TIME_DIFF_PAYED_BILL', 'TIME_DIFF_PAYBEF_PAYED'].quantile(0.9)
inv_mean = inv_group['PRICE', 'TIME_DIFF_PAYED_BILL', 'TIME_DIFF_PAYBEF_PAYED', 'PAYED',
'STATUS_ID_P', 'STATUS_ID_D', 'STATUS_ID_N', 'STATUS_ID_T'].mean()
inv_median = inv_group['PRICE', 'TIME_DIFF_PAYED_BILL', 'TIME_DIFF_PAYBEF_PAYED'].median()
inv_result = pd.merge(inv_quantile01, inv_quantile09, on='UF_COMPANY_ID', suffixes=['_INV_Q01', '_INV_Q09'])
inv_result1 = pd.merge(inv_mean, inv_median, on='UF_COMPANY_ID', suffixes=['_INV_MEAN', '_INV_MEDIAN'])
inv_result = pd.merge(inv_result, inv_result1, on='UF_COMPANY_ID')
inv_result = pd.merge(inv_result, inv_month_together, on='UF_COMPANY_ID')
inv_result = pd.merge(inv_result, inv_by_year, on='UF_COMPANY_ID')
inv_result = inv_result.mask(np.isinf(inv_result))
ret_df = pd.concat([ret_df, inv_result])
return ret_df
@staticmethod
def _parse_quote(quote_list):
"""
Создает DataFrame коммерческих предложений по списку словарей из запроса
:param quote_list: list of dicts
:return: pandas.DataFrame
"""
ret_df = pd.DataFrame(columns=[
'OPPORTUNITY_QUO_Q01', 'TIME_DIFF_CREATE_CLOSE_QUO_Q01', 'OPPORTUNITY_QUO_Q09',
'TIME_DIFF_CREATE_CLOSE_QUO_Q09', 'CLOSED', 'OPPORTUNITY_QUO_MEAN', 'TIME_DIFF_CREATE_CLOSE_QUO_MEAN',
'STATUS_ID_DEC', 'STATUS_ID_APP', 'STATUS_ID_DRA', 'STATUS_ID_UNA', 'STATUS_ID_REC',
'OPPORTUNITY_QUO_MEDIAN', 'TIME_DIFF_CREATE_CLOSE_QUO_MEDIAN'])
ret_df.index.name = 'COMPANY_ID'
if quote_list:
quote_df = pd.DataFrame(quote_list)
quote_df['OPPORTUNITY'] = pd.to_numeric(quote_df['OPPORTUNITY'])
quote_df['CLOSEDATE'] = | pd.to_datetime(quote_df['CLOSEDATE']) | pandas.to_datetime |
""" Base stock class
Based on https://github.com/ranaroussi/yfinance/blob/master/yfinance/base.py
Copyright 2020- <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
import time
import datetime
import json
import requests
from . import helpers
from warnings import warn
VALID_PERIOD = ['1d', '5d', '1mo', '3mo', '6mo',
'1y', '2y', '5y', '10y', 'ytd', 'max']
VALID_INTERVAL = ['1m', '2m', '5m', '15m', '30m', '60m',
'90m', '1h', '1d', '5d', '1wk', '1mo', '3mo']
class Ticker():
"""Base class of stockmanager,
here it holds all basic infomation of a particular
ticker. The information is requested from Yahoo Finance.
Attributes
----------
symbol : str
ticker symbol, updating the symbol will update the fundamental,
e.g. Microsoft is MSFT.
_base_url : str
https://query1.finance.yahoo.com
_scrape_url : str
https://finance.yahoo.com/quote
_price_request_content : dict
Raw content of the web request.
_fundamentals : bool
Flag to check if get_fundamentals() is already successfully called.
major_holders : pandas.DataFrame
Major holders
institutional_holders : pandas.DataFrame
Top institutional holders
mutual_fund_holder : pandas.DataFrame
Top mutual fund holder
company_information : dict
General information of the company,
e.g. sector, fullTimeEmployees, website, etc.
"""
# TODO add proxy
# TODO dont run summary to save time.
def __init__(self, symbol, proxy=None):
"""ticker is a string name of the stock"""
self._ticker_symbol = symbol.upper()
self._base_url = 'https://query1.finance.yahoo.com'
self._scrape_url = 'https://finance.yahoo.com/quote'
self._fundamentals = False
self._recommendations = None # TODO to be decided whether this necessary
self._institutional_holders = None
self._major_holders = None
self._mutual_fund_holders = None
self._sustainability = None
self._calendar = None
self._expirations = {}
self._info = None
self._proxy = proxy
self._name = None
self._current_price = None
self._currency = None
self._earnings = {
"yearly": helpers.empty_df(),
"quarterly": helpers.empty_df()}
self._financials = {
"yearly": helpers.empty_df(),
"quarterly": helpers.empty_df()}
self._balancesheet = {
"yearly": helpers.empty_df(),
"quarterly": helpers.empty_df()}
self._cashflow = {
"yearly": helpers.empty_df(),
"quarterly": helpers.empty_df()}
self.meta = []
self.timestamp = []
self.indicators = []
@property
def symbol(self):
"""ticker symbol."""
return self._ticker_symbol
@symbol.setter
def symbol(self, symbol):
self._ticker_symbol = symbol
@property
def institutional_holders(self):
return self._institutional_holders
@property
def major_holders(self):
return self._major_holders
@property
def mutual_fund_holders(self):
return self._mutual_fund_holders
@property
def sustainability(self):
return self._sustainability
@property
def company_information(self):
return self._info
@property
def name(self):
return self._name
@property
def current_price(self):
try:
url = '%s/%s' % (self._scrape_url, self._ticker_symbol)
data = helpers.get_json(url, self._proxy)
self._current_price = data['price']['regularMarketPrice']
return self._current_price
except:
return self._current_price
@property
def currency(self):
return self._currency
def get_price(self, period="1mo", interval="1d",
start=None, end=None, timezone=None, format='df'):
"""Return a DataFrame of the ticker based on certain period and interval
Examples
--------
Two ways of choosing the time range, 1) using period 2) start/end::
from stockmanager import Ticker
msft = StockBase('MSFT')
df1 = msft.get_price(period='3mo', interval='1d')
# or to use start and end
df2 = msft.get_price(start='2020-01-01', end='2020-02-01')
Parameters
----------
period : str
Time period to retrive, it can only be one of the following:
1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
interval : str
Interval of the desired period, it can only be one of the following:
1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
start : None or str
Use either period or start/end. If start/end is used,
use yy-mm-dd format. start date will be included if possible
end : None or str
End date, yy-mm-dd, end date will be included if possible.
timezone : None or str
timezone for timestamp conversion.
format : str
Indicate the return variable type. By default it is a pandas DataFrame.
Other options are dict (returns the raw dictionary file). Or json
(returns in json format)
"""
# First get the time period right
if start or period is None or period.lower() == "max":
if start is None:
start = -2208988800
elif isinstance(start, str):
start = np.datetime64(start)
start = pd.to_datetime(start)
start = int(start.timestamp())
elif isinstance(start, datetime.datetime):
start = int(time.mktime(start.timetuple()))
else:
raise(TypeError("start must be None, str, or datetime.dateime"))
if end is None:
end = int(time.time())
elif isinstance(end, str):
end = np.datetime64(end)
end = pd.to_datetime(end) + datetime.timedelta(days=1) # This is to include end date
end = int(end.timestamp())
elif isinstance(end, datetime.datetime):
end = int(time.mktime(end.timetuple()))
else:
raise(TypeError("end must be None, str, or datetime.dateime"))
params = {"period1": start, "period2": end}
else:
period = period.lower()
if period not in VALID_PERIOD:
raise(AttributeError("valid period: 1d, 5d, 1mo, 3mo, 6mo, 1y, 2y, 5y, 10y, ytd, max. "))
params = {"range": period}
if interval not in VALID_INTERVAL:
raise(AttributeError("valid interval: 1m, 2m, 5m, 15m, 30m, 60m, 90m, 1h, 1d, 5d, 1wk, 1mo, 3mo"))
params["interval"] = interval.lower()
url = "{}/v8/finance/chart/{}".format(self._base_url, self._ticker_symbol)
self._price_request_content = requests.get(url=url, params=params)
# What if other language? Question, how to test it.
if "Will be right back" in self._price_request_content.text:
raise RuntimeError("*** YAHOO! FINANCE IS CURRENTLY DOWN! ***\n")
self._price_request_content = self._price_request_content.json()
self._price_request_content = self._price_request_content["chart"]["result"][0]
# Raw information from the request response.
self.meta = self._price_request_content['meta']
self.timestamp = self._price_request_content['timestamp']
self.indicators = self._price_request_content['indicators']
self.prices = self.indicators["quote"][0]
if format.lower() == "df":
try:
df = helpers.create_df(self._price_request_content, timezone)
df.dropna(inplace=True)
except Exception:
raise RuntimeError("Error parsing content.")
self.prices = df.copy()
elif format.lower() == "json":
# Dumping into a JSon formatted string.
self.prices = json.dumps(self.prices, sort_keys=True)
# self.prices = json.loads(s)
elif format.lower() == "dict":
pass
else:
warn("unrecognised format, return as dict")
return self.prices
def get_fundamentals(self, kind=None, proxy=None):
""""This part scrap information from the Yahoo Finance:
https://finance.yahoo.com/quote/YOUR_TICKER
It will try to get all fundamental information for more info than just prices.
Attributes
----------
* major_holders
* institutional_holders
* mutual_fund_holders
* info
* recommendations
"""
def cleanup(data):
df = | pd.DataFrame(data) | pandas.DataFrame |
import json
import math
import os
from collections import OrderedDict
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
class Visualize(object):
def __init__(self):
self.method = 'Method'
self.direction = 'Direction'
self.accurate = 'Accurate'
self.filename_template = 'compare_of_method_{method}_dataset_{dataset}.csv'
self.methods_color_map = OrderedDict({
'TCA': 'xkcd:purple',
'JDA': 'xkcd:blue',
'BDA': 'xkcd:light blue',
'GFK': 'xkcd:green',
'SA': 'xkcd:pink',
'TJM': 'xkcd:brown',
'CORAL': 'xkcd:red',
'MEDA': 'xkcd:teal',
'EasyTL': 'xkcd:orange',
})
self.datasets = ['Amazon_Review', 'COIL_20', 'Cross_Dataset', 'Image_CLEF', 'Mnist-USPS',
'Office_31', 'Office_Caltech', 'Office_home', 'PIE', 'VisDA', 'VLSC', 'DomainNet']
self.dataset_name = ''
self.xticks_rotation = 0
self.legend_loc = 'upper left'
self.title = ''
self.stat_file = 'stat_file.json'
def read_csv(self, filename):
df = pd.read_csv(filename, engine='python')
# formalize
if any(df[self.accurate] > 1):
df[self.accurate] = df[self.accurate] / 100.0
return df
def _filter_by_method(self, df, method):
return df[df[self.method] == method]
def old(self, df):
df = self._filter_by_method(df, 'Old')
return df[[self.direction, self.accurate]]
def new(self, df):
df = self._filter_by_method(df, 'New')
return df[[self.direction, self.accurate]]
def show_csv(self, filename):
df = self.read_csv(filename)
# old
old = self.old(df)
new = self.new(df)
# rename direction
old = self._rename_direction(old)
new = self._rename_direction(new)
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(1, 1, 1)
ax.plot(old[self.direction], old[self.accurate], label='Old', linestyle="--") # 虚线
ax.plot(new[self.direction], new[self.accurate], label='New', linestyle="-") # 实线
ax.legend(loc=self.legend_loc)
plt.xticks(rotation=self.xticks_rotation)
plt.tight_layout()
plt.show()
def cal_stat(self):
stat = {
'total': '',
'win_count': '',
'win_rate': '',
'min_increase': '',
'max_increase': '',
'avg_increase': ''
}
df = None
for idx, method in enumerate(self.methods_color_map.keys()):
filename = self.filename_template.format(method=method, dataset=self.dataset_name)
data = self.read_csv(filename)
old, new = self.old(data), self.new(data)
diff = | pd.merge(old, new, on=self.direction) | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 10 00:05:49 2021
@author: <NAME>
"""
import requests
import json
import time
from datetime import date, timedelta
import itertools
from ftfy import fix_encoding
import unidecode
import pandas as pd
class admetricks_api:
"""
A class to generate requests to the Admetricks REST API and get a report.
The operation of the methods is subject to the correct input of the variables and
to the admetricks user you are using having access to the admetricks REST API.
To learn more about the API documentation, go to https://dev.admetricks.com/#introduccion
...
Attributes
----------
username : str
user used to login in Admetricks
password : str
<PASSWORD> to login in Admetricks
Methods
-------
reports_generator(country = None, ad_type = None, device = None, since_date = None):
Returns a dataframe with a full report with the information of the API
screenshots_data(country = None, site = None, since_date = None, until_date = None):
Returns a dataframe with the raw information of captured screenshots
"""
dictionary_countrys = {1:'chile',
2:'colombia',
3:'argentina',
4:'brasil',
5:'españa',
6:'peru',
7:'mexico',
8:'honduras',
9:'puerto rico',
10:'panama',
11:'uruguay',
12:'costa rica',
13:'guatemala',
14:'ecuador',
15:'venezuela',
16:'nicaragua',
17:'salvador',
18:'republica dominicana',
19:'paraguay'}
device = {1:'desktop', 2:'mobile'}
ad_type = {1:'display', 2:'video', 3:'text'}
current_date = date.today().isoformat()
days_before = (date.today()-timedelta(days=30)).isoformat()
combinations = list(itertools.product(list(device.values()),list(ad_type.values())))
def __init__(self, username = None, password = None):
"""
You provide the necessary data to authenticate within Admetricks.
Parameters
----------
username : str
username used to login in Admetricks
password : str
password used to login in Admetricks
"""
self.username = username
self.password = password
url = """https://clientela.admetricks.com/o/token/?username={username}&password={password}&client_id=IW8M80h7qgCaSz4hPm3gr3wJP89NiJTPyhkwPurT&client_secret=KnBW84uyHlxwlNrKOXyym6Ro1IT6IlYdhScdop63hHddCzJIxUwDG7VItNgEONb1U2ebEH6fBmkYgX9LrZD4uqFJlYscHYn9MLxOm2qVccNE2WGEuePpKA7t3jQ2CvMu&grant_type=password"""
response = requests.post(url.format(username = self.username, password = self.password))
res = json.loads(response.text)
self.token = res.get('access_token')
print('Your active token is {}'.format(self.token))
print(response)
def reports_generator(self, country = None, ad_type = None, device = None, since_date = None):
"""
A function that returns a dataframe with a full report with the information of the API.
Parameters
----------
country : str
name of your country.
ad_type : str
Type of ad you want to study. The options are: [all, display, video, text]
device : str
Type of device you want to study. The options are: [all, desktop, mobile]
since_date : str
From what date do you want to export data.
Returns
-------
DataFrame
"""
if isinstance(country, type(None)):
country_error = 'Define your country'
raise country_error
if isinstance(ad_type, type(None)):
ad_type = 'all'
if isinstance(device, type(None)):
device = 'all'
if isinstance(since_date, type(None)):
since_date = str(self.days_before)
country = country.lower()
country = unidecode.unidecode(country)
my_dataframe = pd.DataFrame()
header = {
'Authorization': 'Bearer '+ self.token,
'content-type': 'application/json'}
country_value = list(self.dictionary_countrys.keys())[list(self.dictionary_countrys.values()).index(country)]
if ad_type == 'all':
if device == 'all':
for devices, ad_types in self.combinations:
device_value = list(self.device.keys())[list(self.device.values()).index(devices)]
ad_type_value = list(self.ad_type.keys())[list(self.ad_type.values()).index(ad_types)]
params = (('day', since_date), ('country', str(country_value)), ('device', str(device_value)), ('ad_type', str(ad_type_value)),)
requested = requests.post(url = 'https://clientela.admetricks.com/market-report/data/v3/', headers = header, params = params)
data = json.loads(requested.text)
my_dataframe = pd.concat([my_dataframe, pd.DataFrame.from_dict(data['data'])])
time.sleep(0.5)
else:
device_value = list(self.device.keys())[list(self.device.values()).index(device)]
for value, names in self.ad_type.items():
params = (('day', since_date), ('country', str(country_value)), ('device', str(device_value)), ('ad_type', str(value)),)
requested = requests.post(url = 'https://clientela.admetricks.com/market-report/data/v3/', headers = header, params = params)
data = json.loads(requested.text)
my_dataframe = pd.concat([my_dataframe, pd.DataFrame.from_dict(data['data'])])
time.sleep(0.5)
else:
if device == 'all':
ad_type_value = list(self.ad_type.keys())[list(self.ad_type.values()).index(ad_type)]
for value, names in self.device.items():
params = (('day', since_date), ('country', str(country_value)), ('device', str(value)), ('ad_type', str(ad_type_value)),)
requested = requests.post(url = 'https://clientela.admetricks.com/market-report/data/v3/', headers = header, params = params)
data = json.loads(requested.text)
my_dataframe = pd.concat([my_dataframe, | pd.DataFrame.from_dict(data['data']) | pandas.DataFrame.from_dict |
import argparse
import json
import logging
import time
from pathlib import Path
from typing import List, Dict, Optional
import numpy as np
import pandas as pd
from timeeval import Algorithm, Status, Datasets, Metric
from timeeval.adapters.docker import SCORES_FILE_NAME as DOCKER_SCORES_FILE_NAME
from timeeval.constants import RESULTS_CSV, HYPER_PARAMETERS, METRICS_CSV, ANOMALY_SCORES_TS
from timeeval.data_types import ExecutionType
from timeeval.experiments import Experiment as TimeEvalExperiment
from timeeval.utils.datasets import load_labels_only
# required to build a lookup-table for algorithm implementations
import timeeval_experiments.algorithms as algorithms
# noinspection PyUnresolvedReferences
from timeeval_experiments.algorithms import *
from timeeval_experiments.baselines import Baselines
INITIAL_WAITING_SECONDS = 5
def path_is_empty(path: Path) -> bool:
return not any(path.iterdir())
class Evaluator:
def __init__(self, results_path: Path, data_path: Path, metrics: List[Metric]):
self._logger = logging.getLogger(self.__class__.__name__)
self.results_path = results_path
self.data_path = data_path
self.metrics = metrics
self.algos = self._build_algorithm_dict()
self.dmgr = Datasets(data_path, create_if_missing=False)
self.df: pd.DataFrame = | pd.read_csv(results_path / RESULTS_CSV) | pandas.read_csv |
import numpy as np
import pandas as pd
from sklearn.metrics import roc_auc_score, roc_curve, precision_recall_curve, auc
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV
from imblearn.under_sampling import TomekLinks
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
def get_attribute(excel_filepath='DIAS Attributes - Values 2017'):
'''Processes attribute description data
Args:
excel - attribute information
Returns:
dict - dictionary contains attribute names and Values
'''
att_values = pd.read_excel(excel_filepath, header=1)
att_values = att_values.fillna('')
att_values.drop('Unnamed: 0', axis=1, inplace=True)
# find unique values of each attributes
idx = []
for i in range(att_values.shape[0]):
if len(att_values.Attribute[i]) > 0:
idx.append(i)
attr_dict = {}
for i in range(len(idx)-1):
key_name = att_values.Attribute[idx[i]]
attr_dict[key_name] = att_values.Value[idx[i]:idx[i+1]].tolist()
last_key = att_values.Attribute[idx[-1]]
attr_dict[last_key] = att_values.Value[idx[i]:].tolist()
return attr_dict
def check_value(x):
'''check the values for missing value'''
if type(x) == float:
return x
elif x == 'X' or (x == 'XX'):
return np.nan
else:
return float(x)
def clean_data(df, attr_dict):
'''Processes data
- Converts missing values to np.nan using loaded features table
- Drops unwanted columns and rows
- Convert mixed datatype to float
- Perfroms feature enginerring
Args:
df (pd.Dataframe): data to be cleaned
feat_info (to_dict): feature information
Returns:
cleaned_df (pd.Dataframe): cleaned rows
'''
clean_df = df.copy()
cols = clean_df.columns[18:20]
for col in cols:
clean_df[col] = clean_df[col].apply(lambda x: check_value(x))
col_nulls = clean_df.isnull().sum()/clean_df.shape[0]
row_nulls = clean_df.isnull().sum(axis=1)/clean_df.shape[1]
# remove columns with more than 20% nulls in azdias dataframe
cols = col_nulls[col_nulls<=0.22].index.tolist()
clean_df = clean_df.loc[:, cols]
# remove columns with kba
kba_cols = clean_df.columns[clean_df.columns.str.startswith('KBA')]
clean_df.drop(list(kba_cols), axis=1, inplace=True)
# get the dummy for region
dummy = pd.get_dummies(clean_df['OST_WEST_KZ'])
clean_df.drop('OST_WEST_KZ', axis=1, inplace=True)
clean_df = pd.concat([clean_df, dummy], axis=1)
# re-engineer PRAEGENDE_JUGENDJAHRE
to_replace = {1:4, 2:4, 3:5, 4:5, 5:6, 6:6, 7:6, 8:7, 9:7, 10:8, 11:8, 12:8, 13:8, 14:9, 15:9}
clean_df['decade'] = clean_df['PRAEGENDE_JUGENDJAHRE'].replace(to_replace)
clean_df.drop(['CAMEO_DEU_2015', 'PRAEGENDE_JUGENDJAHRE', 'D19_LETZTER_KAUF_BRANCHE', 'EINGEFUEGT_AM'] , axis=1, inplace=True)
return clean_df
def fill_null(clean_df):
'''This function takes the cleaned df, fill numerical columns with mean, and
categorical columns with median.
Args: clean df
Return: df without missing values
'''
# select columns with numerical values
num_col = []
for key, item in attr_dict.items():
if item[0] == '…':
num_col.append(key)
# fill mean for numerical columns
for col in num_col:
try:
az_mean = clean_df[col].mean()
clean_df[col] = clean_df[col].fillna(az_mean)
except KeyError:
continue
# fill median for categorical columns
# fill all other columns with mode
for col in clean_df.columns:
try:
az_median = clean_df[col].median()
clean_df[col] = clean_df[col].fillna(az_median)
except KeyError:
continue
return clean_df
def build_model(model):
'''
Creates pipeline with two steps: column transformer (ct) introduced in preprocessing step and classifier (model).
Input:
scaler: scale the features
model: object type that implements the “fit” and “predict” methods
Output:
pipeline: object type with "fit" and "predict" methods
'''
pipeline = Pipeline([
('scaler', StandardScaler()),
('clf', model)
])
parameters = {
'clf__n_estimators': [50, 100, 200],
'clf__learning_rate': [0.001, 0.01, 0.1],
'clf__boosting_type': ['gbdt','dart'],
'clf__num_leaves': [31, 62]
#'clf__base_estimator__min_samples_split': [2, 5, 10],
#'clf__base_estimator__max_depth': [1, 3, 5]
}
cv = GridSearchCV(pipeline, param_grid=parameters)
return cv
def clean_test(df_cus):
'''Processes data
- Converts missing values to np.nan using loaded features table
- Drops unwanted columns and rows
- Convert mixed datatype to float
- Perfroms feature enginerring
Args:
df (pd.Dataframe): data to be cleaned
Returns:
cleaned_df (pd.Dataframe): cleaned rows
'''
cols = df_cus.columns[18:20]
for col in cols:
df_cus[col] = df_cus[col].apply(lambda x: ml.check_value(x))
# get dummy regions
dummy = | pd.get_dummies(df_cus['OST_WEST_KZ']) | pandas.get_dummies |
"""
Module for legacy LEAP dataset.
"""
import json
import os
import numpy as np
import pandas as pd
from typing import List
from sleap.util import json_loads
from sleap.io.video import Video
from sleap.instance import (
LabeledFrame,
PredictedPoint,
PredictedInstance,
Track,
Point,
Instance,
)
from sleap.skeleton import Skeleton
def load_predicted_labels_json_old(
data_path: str,
parsed_json: dict = None,
adjust_matlab_indexing: bool = True,
fix_rel_paths: bool = True,
) -> List[LabeledFrame]:
"""
Load predicted instances from Talmo's old JSON format.
Args:
data_path: The path to the JSON file.
parsed_json: The parsed json if already loaded, so we can save
some time if already parsed.
adjust_matlab_indexing: Whether to adjust indexing from MATLAB.
fix_rel_paths: Whether to fix paths to videos to absolute paths.
Returns:
List of :class:`LabeledFrame` objects.
"""
if parsed_json is None:
data = json.loads(open(data_path).read())
else:
data = parsed_json
videos = | pd.DataFrame(data["videos"]) | pandas.DataFrame |
"""High-level functions to help perform complex tasks
"""
from __future__ import print_function, division
import os
import multiprocessing as mp
import warnings
from datetime import datetime
import platform
import struct
import shutil
import copy
import numpy as np
import pandas as pd
import time
pd.options.display.max_colwidth = 100
from ..pyemu_warnings import PyemuWarning
try:
import flopy
except:
pass
import pyemu
from pyemu.utils.os_utils import run, start_workers
def geostatistical_draws(pst, struct_dict,num_reals=100,sigma_range=4,verbose=True):
"""construct a parameter ensemble from a prior covariance matrix
implied by geostatistical structure(s) and parameter bounds.
Args:
pst (`pyemu.Pst`): a control file (or the name of control file). The
parameter bounds in `pst` are used to define the variance of each
parameter group.
struct_dict (`dict`): a dict of GeoStruct (or structure file), and list of
pilot point template files pairs. If the values in the dict are
`pd.DataFrames`, then they must have an 'x','y', and 'parnme' column.
If the filename ends in '.csv', then a pd.DataFrame is loaded,
otherwise a pilot points file is loaded.
num_reals (`int`, optional): number of realizations to draw. Default is 100
sigma_range (`float`): a float representing the number of standard deviations
implied by parameter bounds. Default is 4.0, which implies 95% confidence parameter bounds.
verbose (`bool`, optional): flag to control output to stdout. Default is True.
flag for stdout.
Returns
`pyemu.ParameterEnsemble`: the realized parameter ensemble.
Note:
parameters are realized by parameter group. The variance of each
parameter group is used to scale the resulting geostatistical
covariance matrix Therefore, the sill of the geostatistical structures
in `struct_dict` should be 1.0
Example::
pst = pyemu.Pst("my.pst")
sd = {"struct.dat":["hkpp.dat.tpl","vka.dat.tpl"]}
pe = pyemu.helpers.geostatistical_draws(pst,struct_dict=sd}
pe.to_csv("my_pe.csv")
"""
if isinstance(pst,str):
pst = pyemu.Pst(pst)
assert isinstance(pst,pyemu.Pst),"pst arg must be a Pst instance, not {0}".\
format(type(pst))
if verbose: print("building diagonal cov")
full_cov = pyemu.Cov.from_parameter_data(pst, sigma_range=sigma_range)
full_cov_dict = {n: float(v) for n, v in zip(full_cov.col_names, full_cov.x)}
# par_org = pst.parameter_data.copy # not sure about the need or function of this line? (BH)
par = pst.parameter_data
par_ens = []
pars_in_cov = set()
keys = list(struct_dict.keys())
keys.sort()
for gs in keys:
items = struct_dict[gs]
if verbose: print("processing ",gs)
if isinstance(gs,str):
gss = pyemu.geostats.read_struct_file(gs)
if isinstance(gss,list):
warnings.warn("using first geostat structure in file {0}".\
format(gs),PyemuWarning)
gs = gss[0]
else:
gs = gss
if gs.sill != 1.0:
warnings.warn("GeoStruct {0} sill != 1.0 - this is bad!".format(gs.name))
if not isinstance(items,list):
items = [items]
#items.sort()
for item in items:
if isinstance(item,str):
assert os.path.exists(item),"file {0} not found".\
format(item)
if item.lower().endswith(".tpl"):
df = pyemu.pp_utils.pp_tpl_to_dataframe(item)
elif item.lower.endswith(".csv"):
df = pd.read_csv(item)
else:
df = item
if "pargp" in df.columns:
if verbose: print("working on pargroups {0}".format(df.pargp.unique().tolist()))
for req in ['x','y','parnme']:
if req not in df.columns:
raise Exception("{0} is not in the columns".format(req))
missing = df.loc[df.parnme.apply(
lambda x : x not in par.parnme),"parnme"]
if len(missing) > 0:
warnings.warn("the following parameters are not " + \
"in the control file: {0}".\
format(','.join(missing)),PyemuWarning)
df = df.loc[df.parnme.apply(lambda x: x not in missing)]
if "zone" not in df.columns:
df.loc[:,"zone"] = 1
zones = df.zone.unique()
aset = set(pst.adj_par_names)
for zone in zones:
df_zone = df.loc[df.zone==zone,:].copy()
df_zone = df_zone.loc[df_zone.parnme.apply(lambda x: x in aset),:]
if df_zone.shape[0] == 0:
warnings.warn("all parameters in zone {0} tied and/or fixed, skipping...".format(zone),PyemuWarning)
continue
#df_zone.sort_values(by="parnme",inplace=True)
df_zone.sort_index(inplace=True)
if verbose: print("build cov matrix")
cov = gs.covariance_matrix(df_zone.x,df_zone.y,df_zone.parnme)
if verbose: print("done")
if verbose: print("getting diag var cov",df_zone.shape[0])
#tpl_var = np.diag(full_cov.get(list(df_zone.parnme)).x).max()
tpl_var = max([full_cov_dict[pn] for pn in df_zone.parnme])
if verbose: print("scaling full cov by diag var cov")
#cov.x *= tpl_var
for i in range(cov.shape[0]):
cov.x[i,:] *= tpl_var
# no fixed values here
pe = pyemu.ParameterEnsemble.from_gaussian_draw(pst=pst,cov=cov,num_reals=num_reals,
by_groups=False,fill=False)
#df = pe.iloc[:,:]
par_ens.append(pe._df)
pars_in_cov.update(set(pe.columns))
if verbose: print("adding remaining parameters to diagonal")
fset = set(full_cov.row_names)
diff = list(fset.difference(pars_in_cov))
if (len(diff) > 0):
name_dict = {name:i for i,name in enumerate(full_cov.row_names)}
vec = np.atleast_2d(np.array([full_cov.x[name_dict[d]] for d in diff]))
cov = pyemu.Cov(x=vec,names=diff,isdiagonal=True)
#cov = full_cov.get(diff,diff)
# here we fill in the fixed values
pe = pyemu.ParameterEnsemble.from_gaussian_draw(pst,cov,num_reals=num_reals,
fill=False)
par_ens.append(pe._df)
par_ens = | pd.concat(par_ens,axis=1) | pandas.concat |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Metrics that allow to retrieve curves of partial results.
Typically used to retrieve partial learning curves of ML training jobs.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import pandas as pd
from ax.core.base_trial import BaseTrial
from ax.core.data import Data
from ax.core.experiment import Experiment
from ax.core.map_data import MapData, MapKeyInfo
from ax.core.map_metric import MapMetric
from ax.core.metric import Metric
from ax.core.trial import Trial
from ax.early_stopping.utils import align_partial_results
from ax.utils.common.logger import get_logger
from ax.utils.common.typeutils import checked_cast
logger = get_logger(__name__)
class AbstractCurveMetric(MapMetric, ABC):
"""Metric representing (partial) learning curves of ML model training jobs."""
MAP_KEY = MapKeyInfo(key="training_rows", default_value=0.0)
def __init__(
self,
name: str,
curve_name: str,
lower_is_better: bool = True,
) -> None:
"""Inits Metric.
Args:
name: The name of the metric.
curve_name: The name of the learning curve in the training output
(there may be multiple outputs e.g. for MTML models).
lower_is_better: If True, lower curve values are considered better.
"""
super().__init__(name=name, lower_is_better=lower_is_better)
self.curve_name = curve_name
@classmethod
def is_available_while_running(cls) -> bool:
return True
def fetch_trial_data(self, trial: BaseTrial, **kwargs: Any) -> Data:
"""Fetch data for one trial."""
return self.fetch_trial_data_multi(trial=trial, metrics=[self], **kwargs)
@classmethod
def fetch_trial_data_multi(
cls, trial: BaseTrial, metrics: Iterable[Metric], **kwargs: Any
) -> Data:
"""Fetch multiple metrics data for one trial."""
return cls.fetch_experiment_data_multi(
experiment=trial.experiment, metrics=metrics, trials=[trial], **kwargs
)
@classmethod
def fetch_experiment_data_multi(
cls,
experiment: Experiment,
metrics: Iterable[Metric],
trials: Optional[Iterable[BaseTrial]] = None,
**kwargs: Any,
) -> Data:
"""Fetch multiple metrics data for an experiment."""
if trials is None:
trials = list(experiment.trials.values())
trials = [trial for trial in trials if trial.status.expecting_data]
if any(not isinstance(trial, Trial) for trial in trials):
raise RuntimeError(
f"Only (non-batch) Trials are supported by {cls.__name__}"
)
trial_idx_to_id = cls.get_ids_from_trials(trials=trials)
if len(trial_idx_to_id) == 0:
logger.debug("Could not get ids from trials. Returning empty data.")
return MapData(map_key_infos=[cls.MAP_KEY])
all_curve_series = cls.get_curves_from_ids(ids=trial_idx_to_id.values())
if all(id_ not in all_curve_series for id_ in trial_idx_to_id.values()):
logger.debug("Could not get curves from ids. Returning empty data.")
return MapData(map_key_infos=[cls.MAP_KEY])
df = cls.get_df_from_curve_series(
experiment=experiment,
all_curve_series=all_curve_series,
metrics=metrics,
trial_idx_to_id=trial_idx_to_id,
)
return MapData(df=df, map_key_infos=[cls.MAP_KEY])
@classmethod
def get_df_from_curve_series(
cls,
experiment: Experiment,
all_curve_series: Dict[Union[int, str], Dict[str, pd.Series]],
metrics: Iterable[Metric],
trial_idx_to_id: Dict[int, Union[int, str]],
) -> Optional[pd.DataFrame]:
"""Convert a `all_curve_series` dict (from `get_curves_from_ids`) into
a dataframe. For each metric, we get one curve (of name `curve_name`).
Args:
experiment: The experiment.
all_curve_series: A dict containing curve data, as output from
`get_curves_from_ids`.
metrics: The metrics from which data is being fetched.
trial_idx_to_id: A dict mapping trial index to ids.
Returns:
A dataframe containing curve data or None if no curve data could be found.
"""
dfs = []
for trial_idx, id_ in trial_idx_to_id.items():
if id_ not in all_curve_series:
logger.debug(f"Could not get curve data for id {id_}. Ignoring.")
continue
curve_series = all_curve_series[id_]
for m in metrics:
if m.curve_name in curve_series: # pyre-ignore [16]
dfi = _get_single_curve(
curve_series=curve_series,
curve_name=m.curve_name,
metric_name=m.name,
map_key=cls.MAP_KEY.key,
trial=experiment.trials[trial_idx],
)
dfs.append(dfi)
else:
logger.debug(
f"{m.curve_name} not yet present in curves from {id_}. "
"Returning without this metric."
)
if len(dfs) == 0:
return None
return pd.concat(dfs, axis=0, ignore_index=True)
@classmethod
@abstractmethod
def get_ids_from_trials(
cls, trials: Iterable[BaseTrial]
) -> Dict[int, Union[int, str]]:
"""Get backend run ids associated with trials.
Args:
trials: The trials for which to retrieve the associated
ids that can be used to to identify the corresponding
runs on the backend.
Returns:
A dictionary mapping the trial indices to the identifiers
(ints or strings) corresponding to the backend runs associated
with the trials. Trials whose corresponding ids could not be
found should be omitted.
"""
... # pragma: nocover
@classmethod
@abstractmethod
def get_curves_from_ids(
cls, ids: Iterable[Union[int, str]]
) -> Dict[Union[int, str], Dict[str, pd.Series]]:
"""Get partial result curves from backend ids.
Args:
ids: The ids of the backend runs for which to fetch the
partial result curves.
Returns:
A dictionary mapping the backend id to the partial result
curves, each of which is represented as a mapping from
the metric name to a pandas Series indexed by the progression
(which will be mapped to the `MAP_KEY` of the metric class).
E.g. if `curve_name=loss` and `MAP_KEY=training_rows`, then a
Series should look like:
training_rows (index) | loss
-----------------------|------
100 | 0.5
200 | 0.2
"""
... # pragma: nocover
class AbstractScalarizedCurveMetric(AbstractCurveMetric):
"""A linear scalarization of (partial) learning curves of ML model training jobs:
scalarized_curve = offset + sum_i(coefficients[i] * curve[i]).
It is assumed that the output of `get_curves_from_ids` contains all of the curves
necessary for performing the scalarization.
"""
def __init__(
self,
name: str,
coefficients: Dict[str, float],
offset: float = 0.0,
lower_is_better: bool = True,
) -> None:
"""Construct a AbstractScalarizedCurveMetric.
Args:
name: Name of metric.
coefficients: A mapping from learning curve names to their
scalarization coefficients.
offset: The offset of the affine scalarization.
lower_is_better: If True, lower values (of the scalarized metric) are
considered better.
"""
MapMetric.__init__(self, name=name, lower_is_better=lower_is_better)
self.coefficients = coefficients
self.offset = offset
@classmethod
def get_df_from_curve_series(
cls,
experiment: Experiment,
all_curve_series: Dict[Union[int, str], Dict[str, pd.Series]],
metrics: Iterable[Metric],
trial_idx_to_id: Dict[int, Union[int, str]],
) -> Optional[pd.DataFrame]:
"""Convert a `all_curve_series` dict (from `get_curves_from_ids`) into
a dataframe. For each metric, we first get all curves represented in
`coefficients` and then perform scalarization.
Args:
experiment: The experiment.
all_curve_series: A dict containing curve data, as output from
`get_curves_from_ids`.
metrics: The metrics from which data is being fetched.
trial_idx_to_id: A dict mapping trial index to ids.
Returns:
A dataframe containing curve data or None if no curve data could be found.
"""
dfs = []
complete_metrics_by_trial = {
trial_idx: [] for trial_idx in trial_idx_to_id.keys()
}
for trial_idx, id_ in trial_idx_to_id.items():
if id_ not in all_curve_series:
logger.debug(f"Could not get curve data for id {id_}. Ignoring.")
continue
curve_series = all_curve_series[id_]
for m in metrics:
curve_dfs = []
for curve_name in m.coefficients.keys(): # pyre-ignore[16]
if curve_name in curve_series:
curve_df = _get_single_curve(
curve_series=curve_series,
curve_name=curve_name,
map_key=cls.MAP_KEY.key,
trial=experiment.trials[trial_idx],
)
curve_dfs.append(curve_df)
else:
logger.debug(
f"{curve_name} not present in curves from {id_}, so the "
f"scalarization for {m.name} cannot be computed. Returning "
"without this metric."
)
break
if len(curve_dfs) == len(m.coefficients):
# only keep if all curves needed by the metric are available
dfs.extend(curve_dfs)
# mark metrics who have all underlying curves
complete_metrics_by_trial[trial_idx].append(m)
if len(dfs) == 0:
return None
all_data_df = pd.concat(dfs, axis=0, ignore_index=True)
sub_dfs = []
# Do not create a common index across trials, only across the curves
# involved in the scalarized metric.
for trial_idx, dfi in all_data_df.groupby("trial_index"):
# the `do_forward_fill = True` pads with the latest
# observation to handle situations where learning curves
# report different amounts of data.
trial_curves = dfi["metric_name"].unique().tolist()
dfs_mean, dfs_sem = align_partial_results(
dfi,
progr_key=cls.MAP_KEY.key,
metrics=trial_curves,
do_forward_fill=True,
)
for metric in complete_metrics_by_trial[trial_idx]:
sub_df = _get_scalarized_curve_metric_sub_df(
dfs_mean=dfs_mean,
dfs_sem=dfs_sem,
metric=metric,
trial=checked_cast(Trial, experiment.trials[trial_idx]),
)
sub_dfs.append(sub_df)
return | pd.concat(sub_dfs, axis=0, ignore_index=True) | pandas.concat |
from typing import Optional
import math
import enum
import json
from os.path import join as _join
from copy import deepcopy
import pandas as pd
from wepppy.all_your_base.dateutils import YearlessDate
from wepppy.all_your_base.stats import weibull_series, probability_of_occurrence
from wepppy.wepp.out import HillWat
from .wind_transport_thresholds import *
_thisdir = os.path.dirname(__file__)
_data_dir = _join(_thisdir, 'data')
class AshType(enum.IntEnum):
BLACK = 0
WHITE = 1
class AshNoDbLockedException(Exception):
pass
WHITE_ASH_BD = 0.31
BLACK_ASH_BD = 0.22
class AshModel(object):
"""
Base class for the hillslope ash models. This class is inherited by
the WhiteAshModel and BlackAshModel classes
"""
def __init__(self,
ash_type: AshType,
proportion,
decomposition_rate,
bulk_density,
density_at_fc,
fraction_water_retention_capacity_at_sat,
runoff_threshold,
water_transport_rate,
water_transport_rate_k,
wind_threshold,
porosity):
self.ash_type = ash_type
self.proportion = proportion
self.ini_ash_depth_mm = None
self.ini_ash_load_tonneha = None
self.decomposition_rate = decomposition_rate
self.bulk_density = bulk_density
self.density_at_fc = density_at_fc
self.fraction_water_retention_capacity_at_sat = fraction_water_retention_capacity_at_sat
self.runoff_threshold = runoff_threshold
self.water_transport_rate = water_transport_rate
self.water_transport_rate_k = water_transport_rate_k
self.wind_threshold = wind_threshold
self.porosity = porosity
@property
def ini_material_available_mm(self):
print('proportion', self.proportion, type(self.proportion))
print('ini_ash_depth_mm', self.ini_ash_depth_mm, type(self.ini_ash_depth_mm))
return self.proportion * self.ini_ash_depth_mm
@property
def ini_material_available_tonneperha(self):
if self.ini_ash_load_tonneha is not None:
return self.ini_ash_load_tonneha
else:
return 10.0 * self.ini_material_available_mm * self.bulk_density
@property
def water_retention_capacity_at_sat(self):
return self.fraction_water_retention_capacity_at_sat * self.ini_ash_depth_mm
def lookup_wind_threshold_proportion(self, w):
if w == 0.0:
return 0.0
if self.ash_type == AshType.BLACK:
return lookup_wind_threshold_black_ash_proportion(w)
elif self.ash_type == AshType.WHITE:
return lookup_wind_threshold_white_ash_proportion(w)
def run_model(self, fire_date: YearlessDate, element_d, cli_df: pd.DataFrame, hill_wat: HillWat, out_dir, prefix,
recurrence=[100, 50, 25, 20, 10, 5, 2],
area_ha: Optional[float]=None,
ini_ash_depth: Optional[float]=None,
ini_ash_load: Optional[float]=None, run_wind_transport=True, model='neris'):
"""
Runs the ash model for a hillslope
:param fire_date:
month, day of fire as a YearlessDate instance
:param element_d:
dictionary runoff events from the element WEPP output. The keys are (year, mo, da) and the values contain
the row data as dictionaries with header keys
:param cli_df:
the climate file produced by CLIGEN as a pandas.Dataframe
:param out_dir:
the directory save the model output
:param prefix:
prefix for the model output file
:param recurrence:
list of recurrence intervals
:return:
returns the output file name, return period results dictionary
"""
if model == 'anu':
return self._anu_run_model(self, fire_date=fire_date, element_d=element_d, cli_df=cli_df, hill_wat=hill_wat,
out_dir=out_dir, prefix=prefix, recurrence=recurrence,
area_ha=area_ha, ini_ash_depth=ini_ash_depth, ini_ash_load=ini_ash_load,
run_wind_transport=run_wind_transport)
else:
return self._neris_run_model(self, fire_date=fire_date, element_d=element_d, cli_df=cli_df, hill_wat=hill_wat,
out_dir=out_dir, prefix=prefix, recurrence=recurrence,
area_ha=area_ha, ini_ash_depth=ini_ash_depth, ini_ash_load=ini_ash_load,
run_wind_transport=run_wind_transport)
def _neris_run_model(self, fire_date: YearlessDate, element_d, cli_df: pd.DataFrame, hill_wat: HillWat, out_dir, prefix,
recurrence=[100, 50, 25, 20, 10, 5, 2],
area_ha: Optional[float]=None,
ini_ash_depth: Optional[float]=None,
ini_ash_load: Optional[float]=None, run_wind_transport=True):
self.ini_ash_depth_mm = ini_ash_depth
self.ini_ash_load_tonneha = ini_ash_load
# copy the DataFrame
df = deepcopy(cli_df)
hill_wat_d = hill_wat.as_dict()
# number of days in the file
s_len = len(df.da)
#
# Initialize np.arrays to store model values
#
# current fire year starting at 1
fire_years = np.zeros((s_len,), dtype=np.int32)
# days from fire for wach fire year
days_from_fire = np.zeros((s_len,), dtype=np.int32)
# fraction of ash lost from decay for a day
daily_relative_ash_decay = np.zeros((s_len,))
cum_relative_ash_decay = np.zeros((s_len,))
# daily total available ash in tonne/ha
available_ash = np.zeros((s_len,))
# wind transport variables
w_vl_ifgt = np.zeros((s_len,))
proportion_ash_transport = np.zeros((s_len,))
cum_proportion_ash_transport = np.zeros((s_len,))
wind_transport = np.zeros((s_len,))
cum_wind_transport = np.zeros((s_len,))
# peak runoff from the element (PeakRunoffRAW) WEPP output
peak_ro = np.zeros((s_len,))
# effective duration from the element (PeakRunoffRAW) WEPP output
eff_dur = np.zeros((s_len,))
# effective duration from the element (Precip) WEPP output
precip = np.zeros((s_len,))
# water transport modeling variables
water_excess = np.zeros((s_len,))
real_runoff = np.zeros((s_len,))
effective_runoff = np.zeros((s_len,))
cum_runoff = np.zeros((s_len,))
soil_evap = np.zeros((s_len,))
ash_wat_cap = np.zeros((s_len,))
water_transport = np.zeros((s_len,))
cum_water_transport = np.zeros((s_len,))
#
# Loop through each day in the climate file
#
breaks = [] # list of indices of new fire years
fire_year = 0 # current fire year
w_vl_if = 0.0 # maximum wind speed event for current fire year
dff = -1 # days from fire for current year
for i, _row in df.iterrows():
#
# is today the fire day?
#
if _row.mo == fire_date.month and _row.da == fire_date.day:
breaks.append(i) # record the index for the new year
fire_year += 1 # increment the fire year
w_vl_if = 0.0 # reset the wind threshold for the fire year
dff = 0 # reset the days from fire
# store the fire year and days from fire
fire_years[i] = fire_year
days_from_fire[i] = dff
# if we are in the first year of the climate file and haven't encountered the fire date
# we can just continue to the next day
if dff == -1:
continue
#
# on the first day of the fire reset the available ash
#
if dff == 0:
available_ash[i] = self.ini_material_available_tonneperha
#
# model ash decay
#
# this was determine as the derivative of 1 * exp(decomposition_rate * time_from_fire(days))
else:
daily_relative_ash_decay[i] = self.decomposition_rate * math.exp(-self.decomposition_rate * dff)
if i > 0:
cum_relative_ash_decay[i] = cum_relative_ash_decay[i-1] + daily_relative_ash_decay[i]
available_ash[i] = available_ash[i-1] * (1.0 - daily_relative_ash_decay[i])
#
# model runoff
#
# the element file contains event data we need to look up if the current day has data
# from the element_d dictionary
# unpack the key
yr_mo_da = _row.year, _row.mo, _row.da
if yr_mo_da in element_d:
peak_ro[i] = element_d[yr_mo_da]['PeakRO']
eff_dur[i] = element_d[yr_mo_da]['EffDur']
precip[i] = element_d[yr_mo_da]['Precip']
else:
peak_ro[i] = 0.0
eff_dur[i] = 0.0
precip[i] = 0.0
if yr_mo_da in hill_wat_d:
soil_evap[i] = hill_wat_d[yr_mo_da]['Es (mm)']
else:
soil_evap[i] = 0.0
assert not math.isnan(peak_ro[i])
assert not math.isnan(eff_dur[i])
# calculate excess water
water_excess[i] = peak_ro[i] * eff_dur[i]
assert not math.isnan(water_excess[i])
# calculate real runoff accounting for available ash
real_runoff[i] = water_excess[i] - (available_ash[i] / (10 * self.bulk_density)) * self.porosity
if real_runoff[i] < 0:
real_runoff[i] = 0.0
assert not math.isnan(real_runoff[i]), (i, available_ash[i], self.bulk_density)
# calculate runoff over the runoff_threshold specified by the model parameters
effective_runoff[i] = real_runoff[i] - self.runoff_threshold
# clamp to 0
if effective_runoff[i] < 0.0:
effective_runoff[i] = 0.0
if dff == 0:
ash_wat_cap[i] = effective_runoff[i] - soil_evap[i]
else:
ash_wat_cap[i] = ash_wat_cap[i - 1] + effective_runoff[i] - soil_evap[i]
if ash_wat_cap[i] < 0.0:
ash_wat_cap[i] = 0.0
# calculate cumulative runoff
if dff > 0:
cum_runoff[i] = cum_runoff[i-1] + effective_runoff[i-1]
# water transport is empirically modeled
# black and white ash have their own models
if self.ash_type == AshType.BLACK:
water_transport[i] = effective_runoff[i] * self.water_transport_rate
elif self.ash_type == AshType.WHITE:
# runoff_threshold == 0, so real_runoff == effective_runoff
water_transport[i] = effective_runoff[i] * self.water_transport_rate * \
math.exp(self.water_transport_rate_k * cum_runoff[i])
if water_transport[i] > 0:
if water_transport[i] > available_ash[i]:
water_transport[i] = available_ash[i]
available_ash[i] -= water_transport[i]
elif run_wind_transport: # only apply wind transport if there is no water
#
# model wind transport
#
# identify peak wind values within the fire year
if _row['w-vl'] > w_vl_if:
w_vl_if = _row['w-vl'] # store daily wind threshold
w_vl_ifgt[i] = w_vl_if # track max for comparison
else:
w_vl_ifgt[i] = 0.0 # if day is not a max for the year store 0.0
# identify the fraction removed by wind from the wind_transport_thresholds.csv
proportion_ash_transport[i] = self.lookup_wind_threshold_proportion(w_vl_ifgt[i])
assert proportion_ash_transport[i] >= 0.0
# if not the day of the fire adjust by the cumulative proportion of ash transport
if dff > 0:
proportion_ash_transport[i] -= cum_proportion_ash_transport[i-1]
# clamp to 0
if proportion_ash_transport[i] < 0.0:
proportion_ash_transport[i] = 0.0
# calculate cumulative ash transport
if dff == 0:
# on the day of the fire it is the value from the wind thresholds table
cum_proportion_ash_transport[i] = proportion_ash_transport[i]
else:
# on subsequent days sum up the values
cum_proportion_ash_transport[i] = cum_proportion_ash_transport[i-1] + proportion_ash_transport[i]
# lookup yesterdays water transport
relative_ash_decay = 1.0 - cum_relative_ash_decay[i]
# calculate wind transport
wind_transport[i] = (self.ini_material_available_tonneperha - relative_ash_decay) * \
(1.0 - daily_relative_ash_decay[i]) * proportion_ash_transport[i]
if wind_transport[i] < 0.0:
wind_transport[i] = 0.0
if wind_transport[i] > available_ash[i]:
wind_transport[i] = available_ash[i]
# remove wind and water transported ash from the available ash
available_ash[i] -= wind_transport[i]
# clamp to 0
if available_ash[i] < 0.0:
available_ash[i] = 0.0
cum_wind_transport[i] = wind_transport[i]
cum_water_transport[i] = water_transport[i]
if dff > 0:
cum_wind_transport[i] += cum_wind_transport[i-1]
cum_water_transport[i] += cum_water_transport[i-1]
# increment the days from fire variable
dff += 1
# calculate cumulative wind and water transport
ash_transport = water_transport + wind_transport
cum_ash_transport = cum_water_transport + cum_wind_transport
# store in the dataframe
df['fire_year (yr)'] = pd.Series(fire_years, index=df.index)
df['w_vl_ifgt (m/s)'] = pd.Series(w_vl_ifgt, index=df.index)
df['days_from_fire (days)'] = pd.Series(days_from_fire, index=df.index)
df['daily_relative_ash_decay (fraction)'] = pd.Series(daily_relative_ash_decay, index=df.index)
df['available_ash (tonne/ha)'] = pd.Series(available_ash, index=df.index)
df['_proportion_ash_transport (fraction)'] = pd.Series(proportion_ash_transport, index=df.index)
df['_cum_proportion_ash_transport (fraction)'] = pd.Series(cum_proportion_ash_transport, index=df.index)
df['wind_transport (tonne/ha)'] = pd.Series(wind_transport, index=df.index)
df['cum_wind_transport (tonne/ha)'] = pd.Series(cum_wind_transport, index=df.index)
df['peak_ro (mm/hr)'] = pd.Series(peak_ro, index=df.index)
df['eff_dur (hr)'] = pd.Series(eff_dur, index=df.index)
df['precip (mm)'] = pd.Series(precip, index=df.index)
df['water_excess (mm)'] = pd.Series(water_excess, index=df.index)
df['real_runoff (mm)'] = pd.Series(real_runoff, index=df.index)
df['effective_runoff (mm)'] = pd.Series(effective_runoff, index=df.index)
df['cum_runoff (mm)'] = pd.Series(cum_runoff, index=df.index)
df['water_transport (tonne/ha)'] = pd.Series(water_transport, index=df.index)
df['cum_water_transport (tonne/ha)'] = pd.Series(cum_water_transport, index=df.index)
df['ash_transport (tonne/ha)'] = pd.Series(ash_transport, index=df.index)
df['cum_ash_transport (tonne/ha)'] = pd.Series(cum_ash_transport, index=df.index)
if area_ha is not None:
df['ash_delivery (tonne)'] = pd.Series(ash_transport * area_ha, index=df.index)
df['ash_delivery_by_wind (tonne)'] = pd.Series(wind_transport * area_ha, index=df.index)
df['ash_delivery_by_water (tonne)'] = pd.Series(water_transport * area_ha, index=df.index)
df['cum_ash_delivery (tonne)'] = pd.Series(cum_ash_transport * area_ha, index=df.index)
df['cum_ash_delivery_by_wind (tonne)'] = | pd.Series(cum_wind_transport * area_ha, index=df.index) | pandas.Series |
import numpy as np
import pandas as pd
def generateBvc(minL,maxL,nov):
bvcArray = [[]]
csvTable= | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_preprocessing
----------------------------------
Tests for `preprocessing` module.
"""
import pytest
from sktutor.preprocessing import (GroupByImputer, MissingValueFiller,
OverMissingThresholdDropper,
ValueReplacer, FactorLimiter,
SingleValueAboveThresholdDropper,
SingleValueDropper, ColumnExtractor,
ColumnDropper, DummyCreator,
ColumnValidator, TextContainsDummyExtractor,
BitwiseOperator, BoxCoxTransformer,
InteractionCreator, StandardScaler,
PolynomialFeatures, ContinuousFeatureBinner,
TypeExtractor, GenericTransformer,
MissingColumnsReplacer)
from sktutor.pipeline import make_union
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from random import shuffle
from sklearn.pipeline import make_pipeline
@pytest.mark.usefixtures("missing_data")
@pytest.mark.usefixtures("missing_data2")
class TestGroupByImputer(object):
def test_groups_most_frequent(self, missing_data):
# Test imputing most frequent value per group.
prep = GroupByImputer('most_frequent', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.0, 4.0, 4.0, 4.0, 7.0, 9.0, 9.0, 9.0],
'd': ['a', 'a', 'a', None, 'e', 'f', 'j', 'h', 'j', 'j'],
'e': [1, 2, 1, None, None, None, None, None, None, None],
'f': ['a', 'b', 'a', None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'a'],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_groups_mean(self, missing_data):
# Test imputing mean by group.
prep = GroupByImputer('mean', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 7 + 2/3, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.5, 4.0, 4.0, 4.0, 7.0, 9.0, 8+1/3, 9.0],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_groups_median(self, missing_data):
# Test imputing median by group.
prep = GroupByImputer('median', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 1.5, 4, 4, 4, 7, 9, 9, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_most_frequent(self, missing_data):
# Test imputing most frequent with no group by.
prep = GroupByImputer('most_frequent')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, 2, 4, 4, 7, 8, 2, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 4.0, 4.0, 4.0, 4.0, 7.0, 9.0, 4.0, 9.0],
'd': ['a', 'a', 'a', 'a', 'e', 'f', 'a', 'h', 'j', 'j'],
'e': [1, 2, 1, 1, 1, 1, 1, 1, 1, 1],
'f': ['a', 'b', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a'],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'a'],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_mean(self, missing_data):
# Test imputing mean with no group by.
prep = GroupByImputer('mean')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 5, 5, 4, 4, 7, 8, 5, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 5, 4, 4, 4, 7, 9, 5, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_median(self, missing_data):
# Test imputing median with no group by.
prep = GroupByImputer('median')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 4, 4, 4, 4, 7, 8, 4, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 4, 4, 4, 4, 7, 9, 4, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_value_error(self, missing_data):
# Test limiting options without a group by.
prep = GroupByImputer('stdev')
with pytest.raises(ValueError):
prep.fit(missing_data)
def test_key_error(self, missing_data):
# Test imputing with np.nan when a new group level is introduced in
# Transform.
prep = GroupByImputer('mean', 'b')
prep.fit(missing_data)
new_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'987', '987', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
new_data = pd.DataFrame(new_dict)
# set equal to the expected for test means group
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 7+2/3, 8],
'b': ['123', '123', '123',
'987', '987', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.5, 4.0, 4.0, 4.0, 7.0, 9.0, 8+1/3, 9.0],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
result = prep.transform(new_data)
| tm.assert_frame_equal(result, expected, check_dtype=False) | pandas.util.testing.assert_frame_equal |
#! /usr/bin/env python
"""
Contains classes and functions to compute stats for Navigator Solr servers
and compare across deployments
Since this is focused on Navigator analysis as of 2.6.0,
it assumes Solr 4.10 and may break against Solr 5+
"""
from solr_client import SolrServer, SolrCore, frange
import pandas as pd
import re
USAGE = """
Usage: solr_stats.py <config_file_path> <output_xlsx_path>"
The config file should contain the details of all the Navigator deployments you
wish to compare. Each deployment's config should be one a separate line in the
format:
name,host,port,user,password
e.g.,
customer1,foo.cloudera.com,1234,user,password
customer2,bar.cloudera.com,1234,user,password
"""
PATH_PATTEN = re.compile('(?:hdfs://[^/]*)(.*)')
class NavSolrServer(SolrServer):
"""
Navigator Solr server is a SolrServer with convenience properties
for 'nav_elements' and 'nav_relations', Navigator's 2 Solr cores.
"""
@property
def nav_elements(self):
return self.get_core('nav_elements')
@property
def nav_relations(self):
return NavRelations(self, 'nav_relations')
def get_core(self, core):
if core == 'nav_relations':
return NavRelations(self, core)
elif core == 'nav_elements':
return NavCore(self, core)
return super(NavSolrServer, self).get_core(core)
class NavCore(SolrCore):
def get_docs(self, q='*:*', fq='', sort=None, wt='json',
rows=None, indent='true', batch_size=100000, params=None):
if rows is None and sort is None:
sort = 'identity asc'
return super(NavCore, self).get_docs(q, fq, sort, wt, rows, indent,
batch_size, params)
def find_by_id(self, ids, fl=None):
for part in partition(ids):
for r in self.get_docs(fq=terms(part, self.schema.identity),
params={'fl': fl}):
yield r
def partition(lst, size=50*1024):
for i in range(0, len(lst), size):
yield lst[i:i+size]
def terms(ids, field):
return '{{!terms f={}}}{}'.format(field, ','.join(ids))
class NavRelations(NavCore):
def __init__(self, server, core):
super(NavRelations, self).__init__(server, core)
assert isinstance(server, NavSolrServer)
def get_ep1_ids(self, relation_query):
return self.get_endpoint_ids(relation_query, 'endpoint1Ids')
def get_ep2_ids(self, relation_query, limit=None):
return self.get_endpoint_ids(relation_query, 'endpoint2Ids')
def get_endpoint_ids(self, relation_query, endpoint):
ids = []
for d in self.get_docs(relation_query,
params={'fl': ['identity', endpoint]}):
ids.extend(d[endpoint])
return ids
class NavSolrAnalyzer(object):
"""
Class to get server level summary stats across cores, HDFS stats,
and a breakdown of entity counts
"""
def __init__(self, name, host, port, user, pwd, use_tls=False, verify=None):
self.name = name
self.server = NavSolrServer(host, port, user, pwd, use_tls=use_tls,
verify=verify)
self.nav_elements = self.server.nav_elements
def summary_stats(self):
"""
# docs and size for each core in the given SolrServer
"""
resp = self.server.core_admin_status()
core_names = []
stats = ['indexHeapUsageBytes', 'numDocs', 'size']
data = []
for core, status in resp['status'].items():
core_names.append(core)
data.append([status['index'][field] for field in stats])
return pd.DataFrame(data, index=pd.Index(core_names, name='core'),
columns=pd.Index(stats, name='stats')).T
def hdfs_stats(self):
"""
Get HDFS file size summary statistics (non-deleted only)
"""
return self.nav_elements.stats(
'size', fq='sourceType:HDFS AND type:FILE AND -deleted:true',
stats=['max', 'sum', 'mean', 'stddev'])
def count_breakdown(self):
"""
Get entity counts by source type (HDFS, Yarn, etc) and entity type
(File, Operation, etc) for non-deleted entities only
"""
return self.nav_elements.pivot(fields=['sourceType', 'type'],
fq='-deleted:true')
def deleted_stats(self, fq='sourceType:HDFS'):
fq += ('' if fq is None or fq == '' else ' AND ') + 'deleted:true'
delete_time = self.nav_elements.stats(
'deleteTime',
fq=fq + 'AND {!frange l=0 incl=false}deleteTime',
stats=['max', 'min'])
min_date = int(delete_time.ix['min', 0])
max_date = int(delete_time.ix['max', 0])
day_in_millis = 1000 * 60 * 60 * 24
break_points = [0, 1, 7, 30, 90, 365, 730]
labels = ['1 day', '1 week', '1 month', '3 months', '1 year', '2 years',
'invalid deleteTime']
queries = []
for i in range(len(break_points)):
u = max_date - break_points[i] * day_in_millis
if i < len(break_points) - 1:
l = max(min_date, max_date - break_points[i+1] * day_in_millis)
else:
l = min_date
queries.append(self._make_deleteTime_query(l, u, label=labels[i]))
if l == min_date:
break
queries.append(self._make_deleteTime_query(None, min_date, incl=True,
label='invalid deleteTime'))
rs = self.nav_elements.facet_query(queries, fq=fq)
rs = rs.reindex(labels).dropna().astype('int64')
rs.name = 'Deleted'
rs.index.name = 'Date Range'
return rs.to_frame()
def create_stats(self, start='NOW-1YEAR', end='NOW', gap='+1MONTH',
fq='sourceType:HDFS'):
fq += (('' if fq is None or fq == '' else ' AND ') +
'created:[* TO *] AND -deleted:true')
ser = self.nav_elements.facet_range('created', start, end, gap, fq=fq)
ser.index.name = 'Date'
ser.name = 'Created'
return ser.to_frame()
def top_partitions(self, n=10):
fq = 'type:PARTITION AND sourceType:HIVE AND -deleted:true'
df = self.nav_elements.facet_field('parentPath', fq=fq,
limit=n)
df = df.rename(columns={'parentPath':'partition_count'})
if n > 0:
df = df[:n]
self._add_hdfs_subdir_counts(df)
return df
def _add_hdfs_subdir_counts(self, df):
dbs = []
tables = []
for parent_path in df.index:
db, table = tuple([x for x in parent_path.split('/')
if len(x) > 0])
dbs.append(db)
tables.append(table)
df['Database'] = dbs
df['Table'] = tables
df['hdfs_subdir_count'] = self._get_hdfs_subdir_count(dbs, tables)
return df.reset_index()
def _get_hdfs_subdir_count(self, db_names, table_names):
counts = []
for i in range(0, len(db_names), 30):
counts.extend(self._subdir_helper(db_names[i:i+30],
table_names[i:i+30]))
return counts
def _subdir_helper(self, db_names, table_names):
clause_base = '(parentPath:\/{} AND originalName:{})'
lst = []
for db_name, table_name in zip(db_names, table_names):
lst.append(clause_base.format(db_name, table_name))
table_query = ('sourceType:HIVE AND type:TABLE AND (' +
' OR '.join(lst) + ')')
tables = self.nav_elements.get_docs(
fq=table_query, rows=len(db_names))
count_map = {} # (db, tbl) -> count
for t in tables:
key = (t['parentPath'][1:], t['originalName'])
path = _get_file_system_path(t['fileSystemPath'])
query = ('sourceType:HDFS AND type:DIRECTORY AND '
'-fileSystemPath:\{0}/*.hive-staging* AND '
'fileSystemPath:\{0}/*').format(path)
count_map[key] = self.nav_elements.get_count(query)
counts = []
for db_name, table_name in zip(db_names, table_names):
counts.append(count_map[(db_name, table_name)])
return counts
def _make_deleteTime_query(self, l, u, incl=False, incu=True,
label=None):
return frange('deleteTime', l, u, incl, incu, label)
def _get_file_system_path(full_path):
"""
hdfs://Enchilada/path -> path
"""
matches = PATH_PATTEN.match(full_path)
return matches.group(1)
class NavSolrComparator(object):
def __init__(self, analyzers):
self.analyzers = analyzers
def summary_stats(self):
return self._compare('summary_stats')
def hdfs_stats(self):
return self._compare('hdfs_stats').reorder_levels([1,0], 1)
def count_breakdown(self):
return self._compare('count_breakdown').reorder_levels([1,0], 1)
def deleted_stats(self, fq=None):
return self._compare('deleted_stats', fq=fq)
def create_stats(self, fq=None):
return self._compare('create_stats', fq=fq)
def _compare(self, meth, *args, **kwds):
args = list(zip(*[(getattr(a, meth)(*args, **kwds), a.name)
for a in self.analyzers]))
return self._concat(args[0], args[1])
def _concat(self, lst, names):
return pd.concat(lst, axis=1, keys=names)
def to_excel(comparator, path):
writer = | pd.ExcelWriter(path) | pandas.ExcelWriter |
import os
import pandas as pd
dir_datos_abiertos = os.path.join(os.pardir, 'datos_abiertos', '')
dir_series = os.path.join(dir_datos_abiertos, 'series_de_tiempo', 'nuevos', '')
dir_formato = os.path.join(dir_datos_abiertos, 'formato_especial', '')
pos = ( | pd.read_csv(dir_series + 'covid19_mex_confirmados.csv') | pandas.read_csv |
import re
import hashlib
import numpy as np
import pdb
import pprint
import uuid
import os
from pathlib import Path
import pytest
import pandas as pd
from .config import TEST_DIR, TEST_H5, IRB_DIR, GET_IRB_MKDIG, irb_data, mkpy
from mkpy import mkh5
@pytest.mark.parametrize("path_type", [str, Path])
@irb_data
def test_irb_load_code_map_files(path_type):
# h5f = IRB_DIR / "mkh5" / (uuid.uuid4().hex + ".h5")
h5group = "test2"
h5f = IRB_DIR / "mkh5" / (h5group + "_test_load_codemap.h5")
mydat = mkh5.mkh5(h5f)
mydat.reset_all() # start fresh
mydat.create_mkdata(h5group, *GET_IRB_MKDIG(h5group))
# load code mappers in different formats as Path and str
cm_ytbl = mkh5.CodeTagger(path_type(TEST_DIR("data/design2.ytbl")))
cm_txt = mkh5.CodeTagger(path_type(TEST_DIR("data/design2.txt")))
cm_xlsx = mkh5.CodeTagger(path_type(TEST_DIR("data/design2.xlsx")))
cm_xlsx_named_sheet = mkh5.CodeTagger(
path_type(TEST_DIR("data/design2.xlsx!code_map"))
)
# check for identity ... NB: nan == nan evaluates to False
cms = [cm_ytbl, cm_txt, cm_xlsx, cm_xlsx_named_sheet]
ncms = len(cms)
for i, cm1 in enumerate(cms):
for cm2 in cms[i + 1 :]:
print("-" * 40)
print("# ", cm1.cmf)
print(cm1.code_map)
for c in cm1.code_map.columns:
print(c, cm1.code_map[c].dtype)
print("# ", cm2.cmf)
print(cm2.code_map)
for c in cm1.code_map.columns:
print(c, cm1.code_map[c].dtype)
same = cm1.code_map == cm2.code_map
# print(same)
diffs = np.where(same == False)
for r in range(len(diffs[0])):
idx = diffs[0][r]
jdx = diffs[1][r]
print(
"{0}[{1},{2}] --> {3}".format(
cm1.cmf, idx, jdx, repr(cm1.code_map.iat[idx, jdx])
)
)
print(
"{0}[{1},{2}] <-- {3}".format(
cm2.cmf, idx, jdx, repr(cm2.code_map.iat[idx, jdx])
)
)
print()
os.remove(h5f)
@irb_data
def test_irb_event_table():
subid = "test2"
h5f = IRB_DIR / "mkh5" / (subid + "_event_table.h5")
mydat = mkh5.mkh5(h5f)
mydat.reset_all() # start fresh
mydat.create_mkdata(subid, *GET_IRB_MKDIG(subid))
# mydat.create_mkdata('sub02', eeg_f, log_f, yhdr_f)
# mydat.create_mkdata('sub03', eeg_f, log_f, yhdr_f)
# sample code sequence pattern matches
code_map_f = TEST_DIR("data/test2_items.xlsx!code_table")
# Excel header slicers DEPRECATED
# header_chooser_f = 'data/test2_items.xlsx!header_chooser'
header_chooser_f = TEST_DIR("data/test2.yhdx")
print("get_event_table() *WITHOUT* header extraction")
event_table = mydat.get_event_table(code_map_f)
print("get_event_table() *WITH* header extraction")
event_table = mydat.get_event_table(code_map_f, header_chooser_f)
# pprint.pprint(event_table)
# test export event
print("exporting event table")
mydat.export_event_table(
event_table, TEST_DIR("data/test_event_table.fthr"), format="feather"
)
mydat.export_event_table(
event_table, TEST_DIR("data/test_event_table.txt"), format="txt"
)
# clean up
os.remove(h5f)
@irb_data
def test_irb_event_table_b():
subid = "test2"
h5f = IRB_DIR / "mkh5" / (subid + "_event_table.h5")
mydat = mkh5.mkh5(h5f)
mydat.reset_all() # start fresh
mydat.create_mkdata(subid, *GET_IRB_MKDIG(subid))
# sample code sequence pattern matches
code_map_f = TEST_DIR("data/test2b.ytbl")
# Excel header slicers DEPRECATED
# header_chooser_f = 'data/test2_items.xlsx!header_chooser'
header_chooser_f = TEST_DIR("data/test2.yhdx")
print("get_event_table() *WITHOUT* header extraction")
event_table = mydat.get_event_table(code_map_f)
print("get_event_table() *WITH* header extraction")
event_table = mydat.get_event_table(code_map_f, header_chooser_f)
# pprint.pprint(event_table)
# test export event
print("exporting event table")
mydat.export_event_table(event_table, TEST_DIR("data/test_event_table_b.fthr"))
mydat.export_event_table(
event_table, TEST_DIR("data/test_event_table_b.txt"), format="txt"
)
# clean up
os.remove(h5f)
@irb_data
def test_irb_event_table_fails():
subid = "test2"
h5f = IRB_DIR / "mkh5" / (subid + "_event_table.h5")
mydat = mkh5.mkh5(h5f)
mydat.reset_all() # start fresh
mydat.create_mkdata(subid, *GET_IRB_MKDIG(subid))
# design4.ytbl is a well-formed code mapper file but no code
# matches in test2.h5
try:
event_table = mydat.get_event_table(TEST_DIR("data/design4.ytbl"))
except Exception as err:
# hard coded error message in mkh5.get_event_table
if isinstance(err, RuntimeError) and "no events found" in str(err):
print("Caught the no matching codes RuntimeError")
else:
msg = (
"\nNo codes match the pattern so the event table is empty, "
"expected a RuntimeError instead of this:\n"
)
raise RuntimeError(msg + str(err))
os.remove(h5f)
@pytest.mark.parametrize("sheet", ["Sheet1", "Sheet2"])
def test_non_unique_event_table_index(sheet):
"""test xlsx codemap with non-unique index values, with and without ccode"""
# name and reset the .h5 file
sid = "sub000"
eeg_f = TEST_DIR("data/sub000wr.crw")
log_f = TEST_DIR("data/sub000wr.log")
yhdr_f = TEST_DIR("data/sub000wr.yhdr")
yhdx_f = TEST_DIR("data/wr.yhdx")
cal_eeg_f = TEST_DIR("data/sub000c.crw")
cal_log_f = TEST_DIR("data/sub000c.log")
cal_yhdr_f = TEST_DIR("data/sub000c.yhdr")
myh5 = mkh5.mkh5(TEST_H5)
myh5.reset_all()
# load in subject and cals
myh5.create_mkdata(sid, eeg_f, log_f, yhdr_f)
myh5.append_mkdata(sid, cal_eeg_f, cal_log_f, cal_yhdr_f)
# calibrate data
pts, pulse, lo, hi, ccode = 5, 10, -40, 40, 0
myh5.calibrate_mkdata(
sid, # specific data group
n_points=pts, # pts to average
cal_size=pulse, # uV
lo_cursor=lo, # lo_cursor ms
hi_cursor=hi, # hi_cursor ms
cal_ccode=ccode,
) # condition code
# -----------------------------------------------------
# check the event code table specs
# -----------------------------------------------------
code_map_f = TEST_DIR(f"data/wr_code_map.xlsx!{sheet}")
event_table = myh5.get_event_table(code_map_f, yhdx_f)
# use column Index to index the frame for these tests
event_table.set_index("Index", inplace=True)
# events: 209 cals, 144 block 1 words, 144 block 2 words
events = {
"Sheet1": {
"shape": (288, 34), # no ccode column
"idx_n": 144, # no cals, 144 unique words
"word_lags_1": ["unique", "none"],
"word_lags_2_3": ["short", "long"],
},
# Sheet2 has ccode column for bdf compatibility test
"Sheet2": {
"shape": (497, 35), # 34 + ccode column
"idx_n": 145,
"ccodes_n": (209, 0, 144, 144), # ccode 0, 1, 2, 3
"word_lags_1": ["cal", "unique", "none"],
"word_lags_2_3": ["cal", "short", "long"],
},
}
idxs = event_table.index.unique()
assert event_table.shape == events[sheet]["shape"]
assert len(idxs) == events[sheet]["idx_n"]
# count the ccodes, if any
if "ccodes_n" in events[sheet].keys():
for code, n_codes in enumerate(events[sheet]["ccodes_n"]):
assert len(event_table.query("ccode == @code")) == n_codes
for idx in idxs:
row_slice = event_table.loc[idx]
# widen long format series to 1-row dataframe
if isinstance(row_slice, pd.Series):
row_slice = pd.DataFrame(row_slice).T
# check the anchor
for idx, row in row_slice.iterrows():
assert row["anchor_code"] == row["log_evcodes"]
# spot check the word_lag rows
if len(row_slice) == 1:
assert row_slice["word_lag"].unique() in events[sheet]["word_lags_1"]
elif len(row_slice) in [2, 3]:
# duplicate indices, multiple rows match, make sure the
# the right values come back
assert row_slice["word_lag"].unique() in events[sheet]["word_lags_2_3"]
elif len(row_slice) == 209:
assert row_slice["word_lag"].unique()[0] == "cal"
else:
print("{0}".format(row_slice))
raise ValueError("something wrong with word rep event table")
os.remove(TEST_H5)
@pytest.mark.parametrize("codemap", ["no_ccode", "with_ccode"])
def test_p3_yaml_codemap_ccode(codemap):
"""test YAML codemaps with and without ccode"""
# name and reset the .h5 file
sid = "sub000"
eeg_f = TEST_DIR("data/sub000p3.crw")
log_f = TEST_DIR("data/sub000p3.x.log")
yhdr_f = TEST_DIR("data/sub000p3.yhdr")
# yhdx_f = TEST_DIR("data/wr.yhdx")
cal_eeg_f = TEST_DIR("data/sub000c.crw")
cal_log_f = TEST_DIR("data/sub000c.log")
cal_yhdr_f = TEST_DIR("data/sub000c.yhdr")
myh5 = mkh5.mkh5(TEST_H5)
myh5.reset_all()
# load in subject and cals
myh5.create_mkdata(sid, eeg_f, log_f, yhdr_f)
myh5.append_mkdata(sid, cal_eeg_f, cal_log_f, cal_yhdr_f)
# calibrate data
pts, pulse, lo, hi, ccode = 5, 10, -40, 40, 0
myh5.calibrate_mkdata(
sid, # specific data group
n_points=pts, # pts to average
cal_size=pulse, # uV
lo_cursor=lo, # lo_cursor ms
hi_cursor=hi, # hi_cursor ms
cal_ccode=ccode,
) # condition code
# ------------------------------------------------------------
# Fetch and check events for codemaps with and without ccode
# ------------------------------------------------------------
events = {
"no_ccode": {
"event_shape": (492, 30),
"ytbl": TEST_DIR("data/sub000p3_codemap.ytbl"),
"bindesc_f": TEST_DIR("data/sub000p3_bindesc.txt"),
"sha256": "8a8a156ccc532a5b8b9a3b606ba628fab2f3fc9f04bbb2e115c9206c42def9ba",
},
"with_ccode": {
"event_shape": (701, 30),
"ytbl": TEST_DIR("data/sub000p3_codemap_ccode.ytbl"),
"bindesc_f": TEST_DIR("data/sub000p3_ccode_bindesc.txt"),
"sha256": "fddb3e8d02f90fc3ab68383cfa8996d55fc342d2151e17d62e80bf10874ea4b7",
},
}
ytbl = events[codemap]["ytbl"]
event_table = myh5.get_event_table(ytbl).query("is_anchor == True")
assert event_table.shape == events[codemap]["event_shape"]
print(f"{ytbl} event_table {event_table.shape}")
counts = pd.crosstab(event_table.bin, [event_table.log_flags > 0], margins=True)
counts.columns = [str(col) for col in counts.columns]
coi = ["regexp", "bin", "tone", "stim", "accuracy", "acc_type"]
bin_desc = (
event_table[coi]
.drop_duplicates()
.sort_values("bin")
.join(counts, on="bin")
.reset_index()
)
bindesc_f = events[codemap]["bindesc_f"]
# use this to rebuild the gold standard file in event of a change
# bin_desc.to_csv(events[codemap]["bindesc_f"], sep="\t", index=False)
with open(bindesc_f, "rb") as bd:
sha256 = hashlib.sha256(bd.read()).hexdigest()
assert sha256 == events[codemap]["sha256"]
assert all(bin_desc == | pd.read_csv(bindesc_f, sep="\t") | pandas.read_csv |
import datetime
import string
from collections import namedtuple
from distutils.version import LooseVersion
from random import choices
from typing import Optional, Type
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from pandas.tests.extension.base import (
BaseArithmeticOpsTests,
BaseBooleanReduceTests,
BaseCastingTests,
BaseComparisonOpsTests,
BaseConstructorsTests,
BaseDtypeTests,
BaseGetitemTests,
BaseGroupbyTests,
BaseInterfaceTests,
BaseMethodsTests,
BaseMissingTests,
BaseNoReduceTests,
BaseNumericReduceTests,
BaseParsingTests,
BasePrintingTests,
BaseReshapingTests,
BaseSetitemTests,
)
from fletcher import FletcherBaseDtype
if LooseVersion(pd.__version__) >= "0.25.0":
# imports of pytest fixtures needed for derived unittest classes
from pandas.tests.extension.conftest import ( # noqa: F401
as_array, # noqa: F401
use_numpy, # noqa: F401
groupby_apply_op, # noqa: F401
as_frame, # noqa: F401
as_series, # noqa: F401
)
PANDAS_GE_1_1_0 = LooseVersion(pd.__version__) >= "1.1.0"
FletcherTestType = namedtuple(
"FletcherTestType",
[
"dtype",
"data",
"data_missing",
"data_for_grouping",
"data_for_sorting",
"data_missing_for_sorting",
"data_repeated",
],
)
def is_arithmetic_type(arrow_dtype: pa.DataType) -> bool:
"""Check whether this is a type that support arithmetics."""
return (
pa.types.is_integer(arrow_dtype)
or pa.types.is_floating(arrow_dtype)
or pa.types.is_decimal(arrow_dtype)
)
skip_non_artithmetic_type = pytest.mark.skip_by_type_filter(
[lambda x: not is_arithmetic_type(x)]
)
xfail_list_scalar_constuctor_not_implemented = pytest.mark.xfail_by_type_filter(
[pa.types.is_list], "constructor from scalars is not implemented for lists"
)
xfail_list_equals_not_implemented = pytest.mark.xfail_by_type_filter(
[pa.types.is_list], "== is not implemented for lists"
)
xfail_list_setitem_not_implemented = pytest.mark.xfail_by_type_filter(
[pa.types.is_list], "__setitem__ is not implemented for lists"
)
xfail_missing_list_dict_encode = pytest.mark.xfail_by_type_filter(
[pa.types.is_list],
"ArrowNotImplementedError: dictionary-encode not implemented for list<item: string>",
)
xfail_bool_too_few_uniques = pytest.mark.xfail_by_type_filter(
[pa.types.is_boolean], "Test requires at least 3 unique values"
)
test_types = [
FletcherTestType(
pa.string(),
["🙈", "Ö", "Č", "a", "B"] * 20,
[None, "A"],
["B", "B", None, None, "A", "A", "B", "C"],
["B", "C", "A"],
["B", None, "A"],
lambda: choices(list(string.ascii_letters), k=10),
),
FletcherTestType(
pa.bool_(),
[True, False, True, True, False] * 20,
[None, False],
[True, True, None, None, False, False, True, False],
[True, False, False],
[True, None, False],
lambda: choices([True, False], k=10),
),
FletcherTestType(
pa.int8(),
# Use small values here so that np.prod stays in int32
[2, 1, 1, 2, 1] * 20,
[None, 1],
[2, 2, None, None, -100, -100, 2, 100],
[2, 100, -10],
[2, None, -10],
lambda: choices(list(range(100)), k=10),
),
FletcherTestType(
pa.int16(),
# Use small values here so that np.prod stays in int32
[2, 1, 3, 2, 1] * 20,
[None, 1],
[2, 2, None, None, -100, -100, 2, 100],
[2, 100, -10],
[2, None, -10],
lambda: choices(list(range(100)), k=10),
),
FletcherTestType(
pa.int32(),
# Use small values here so that np.prod stays in int32
[2, 1, 3, 2, 1] * 20,
[None, 1],
[2, 2, None, None, -100, -100, 2, 100],
[2, 100, -10],
[2, None, -10],
lambda: choices(list(range(100)), k=10),
),
FletcherTestType(
pa.int64(),
# Use small values here so that np.prod stays in int64
[2, 1, 3, 2, 1] * 20,
[None, 1],
[2, 2, None, None, -100, -100, 2, 100],
[2, 100, -10],
[2, None, -10],
lambda: choices(list(range(100)), k=10),
),
FletcherTestType(
pa.float64(),
[2, 1.0, 1.0, 5.5, 6.6] * 20,
[None, 1.1],
[2.5, 2.5, None, None, -100.1, -100.1, 2.5, 100.1],
[2.5, 100.99, -10.1],
[2.5, None, -10.1],
lambda: choices([2.5, 1.0, -1.0, 0, 66.6], k=10),
),
# Most of the tests fail as assert_extension_array_equal casts to numpy object
# arrays and on them equality is not defined.
pytest.param(
FletcherTestType(
pa.list_(pa.string()),
[["B", "C"], ["A"], [None], ["A", "A"], []] * 20,
[None, ["A"]],
[["B"], ["B"], None, None, ["A"], ["A"], ["B"], ["C"]],
[["B"], ["C"], ["A"]],
[["B"], None, ["A"]],
lambda: choices([["B", "C"], ["A"], [None], ["A", "A"]], k=10),
)
),
FletcherTestType(
pa.date64(),
[
datetime.date(2015, 1, 1),
datetime.date(2010, 12, 31),
datetime.date(1970, 1, 1),
datetime.date(1900, 3, 31),
datetime.date(1999, 12, 31),
]
* 20,
[None, datetime.date(2015, 1, 1)],
[
datetime.date(2015, 2, 2),
datetime.date(2015, 2, 2),
None,
None,
datetime.date(2015, 1, 1),
datetime.date(2015, 1, 1),
datetime.date(2015, 2, 2),
datetime.date(2015, 3, 3),
],
[
datetime.date(2015, 2, 2),
datetime.date(2015, 3, 3),
datetime.date(2015, 1, 1),
],
[datetime.date(2015, 2, 2), None, datetime.date(2015, 1, 1)],
lambda: choices(list(pd.date_range("2010-1-1", "2011-1-1").date), k=10),
),
]
@pytest.fixture(params=[True, False])
def box_in_series(request):
"""Whether to box the data in a Series."""
return request.param
@pytest.fixture(params=test_types)
def fletcher_type(request):
return request.param
@pytest.fixture(autouse=True)
def skip_by_type_filter(request, fletcher_type):
if request.node.get_closest_marker("skip_by_type_filter"):
for marker in request.node.iter_markers("skip_by_type_filter"):
for func in marker.args[0]:
if func(fletcher_type.dtype):
pytest.skip(f"skipped for type: {fletcher_type}")
@pytest.fixture(autouse=True)
def xfail_by_type_filter(request, fletcher_type):
if request.node.get_closest_marker("xfail_by_type_filter"):
for marker in request.node.iter_markers("xfail_by_type_filter"):
for func in marker.args[0]:
if func(fletcher_type.dtype):
pytest.xfail(f"XFAIL for type: {fletcher_type}")
@pytest.fixture
def dtype(fletcher_type, fletcher_dtype):
return fletcher_dtype(fletcher_type.dtype)
@pytest.fixture
def data(fletcher_type, fletcher_array):
return fletcher_array(fletcher_type.data, dtype=fletcher_type.dtype)
@pytest.fixture
def data_for_twos(dtype, fletcher_type, fletcher_array):
if dtype._is_numeric:
return fletcher_array([2] * 100, dtype=fletcher_type.dtype)
else:
return None
@pytest.fixture
def data_missing(fletcher_type, fletcher_array):
return fletcher_array(fletcher_type.data_missing, dtype=fletcher_type.dtype)
@pytest.fixture
def data_repeated(fletcher_type, fletcher_array):
"""Return different versions of data for count times."""
pass # noqa
def gen(count):
for _ in range(count):
yield fletcher_array(
fletcher_type.data_repeated(), dtype=fletcher_type.dtype
)
yield gen
@pytest.fixture
def data_for_grouping(fletcher_type, fletcher_array):
"""Fixture with data for factorization, grouping, and unique tests.
Expected to be like [B, B, NA, NA, A, A, B, C]
Where A < B < C and NA is missing
"""
return fletcher_array(fletcher_type.data_for_grouping, dtype=fletcher_type.dtype)
@pytest.fixture
def data_for_sorting(fletcher_type, fletcher_array):
"""Length-3 array with a known sort order.
This should be three items [B, C, A] with
A < B < C
"""
return fletcher_array(fletcher_type.data_for_sorting, dtype=fletcher_type.dtype)
@pytest.fixture
def data_missing_for_sorting(fletcher_type, fletcher_array):
"""Length-3 array with a known sort order.
This should be three items [B, NA, A] with
A < B and NA missing.
"""
return fletcher_array(
fletcher_type.data_missing_for_sorting, dtype=fletcher_type.dtype
)
@pytest.fixture(params=[None, lambda x: x])
def sort_by_key(request):
"""
Return a simple fixture for festing keys in sorting methods.
Tests None (no key) and the identity key.
"""
return request.param
class TestBaseCasting(BaseCastingTests):
pass
class TestBaseConstructors(BaseConstructorsTests):
def test_from_dtype(self, data):
if pa.types.is_string(data.dtype.arrow_dtype):
pytest.xfail(
"String construction is failing as Pandas wants to pass the FletcherChunkedDtype to NumPy"
)
BaseConstructorsTests.test_from_dtype(self, data)
@xfail_list_scalar_constuctor_not_implemented
def test_series_constructor_scalar_with_index(self, data, dtype):
if PANDAS_GE_1_1_0:
BaseConstructorsTests.test_series_constructor_scalar_with_index(
self, data, dtype
)
class TestBaseDtype(BaseDtypeTests):
pass
class TestBaseGetitemTests(BaseGetitemTests):
def test_loc_iloc_frame_single_dtype(self, data):
if pa.types.is_string(data.dtype.arrow_dtype):
pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/27673"
)
else:
BaseGetitemTests.test_loc_iloc_frame_single_dtype(self, data)
class TestBaseGroupbyTests(BaseGroupbyTests):
@xfail_bool_too_few_uniques
@xfail_missing_list_dict_encode
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_extension_agg(self, as_index, data_for_grouping):
BaseGroupbyTests.test_groupby_extension_agg(self, as_index, data_for_grouping)
@xfail_bool_too_few_uniques
@xfail_missing_list_dict_encode
def test_groupby_extension_no_sort(self, data_for_grouping):
BaseGroupbyTests.test_groupby_extension_no_sort(self, data_for_grouping)
@xfail_missing_list_dict_encode
def test_groupby_extension_transform(self, data_for_grouping):
if pa.types.is_boolean(data_for_grouping.dtype.arrow_dtype):
valid = data_for_grouping[~data_for_grouping.isna()]
df = pd.DataFrame({"A": [1, 1, 3, 3, 1, 4], "B": valid})
result = df.groupby("B").A.transform(len)
# Expected grouping is different as we only have two non-null values
expected = pd.Series([3, 3, 3, 3, 3, 3], name="A")
self.assert_series_equal(result, expected)
else:
BaseGroupbyTests.test_groupby_extension_transform(self, data_for_grouping)
@xfail_missing_list_dict_encode
def test_groupby_extension_apply(
self, data_for_grouping, groupby_apply_op # noqa: F811
):
BaseGroupbyTests.test_groupby_extension_apply(
self, data_for_grouping, groupby_apply_op
)
class TestBaseInterfaceTests(BaseInterfaceTests):
@pytest.mark.xfail(
reason="view or self[:] returns a shallow copy in-place edits are not backpropagated"
)
def test_view(self, data):
BaseInterfaceTests.test_view(self, data)
def test_array_interface(self, data):
if pa.types.is_list(data.dtype.arrow_dtype):
pytest.xfail("Not sure whether this test really holds for list")
else:
BaseInterfaceTests.test_array_interface(self, data)
@xfail_list_setitem_not_implemented
def test_copy(self, data):
BaseInterfaceTests.test_array_interface(self, data)
class TestBaseMethodsTests(BaseMethodsTests):
# https://github.com/pandas-dev/pandas/issues/22843
@pytest.mark.skip(reason="Incorrect expected")
@pytest.mark.parametrize("dropna", [True, False])
def test_value_counts(self, all_data, dropna, dtype):
pass
@xfail_list_equals_not_implemented
@pytest.mark.parametrize("box", [pd.array, pd.Series, pd.DataFrame])
def test_equals(self, data, na_value, as_series, box): # noqa: F811
if PANDAS_GE_1_1_0:
BaseMethodsTests.test_equals(self, data, na_value, as_series, box)
@xfail_missing_list_dict_encode
def test_value_counts_with_normalize(self, data):
if PANDAS_GE_1_1_0:
BaseMethodsTests.test_value_counts_with_normalize(self, data)
def test_combine_le(self, data_repeated):
# GH 20825
# Test that combine works when doing a <= (le) comparison
# Fletcher returns 'fletcher_chunked[bool]' instead of np.bool as dtype
orig_data1, orig_data2 = data_repeated(2)
if pa.types.is_list(orig_data1.dtype.arrow_dtype):
return pytest.skip("__le__ not implemented for list scalars with None")
s1 = pd.Series(orig_data1)
s2 = pd.Series(orig_data2)
result = s1.combine(s2, lambda x1, x2: x1 <= x2)
expected = pd.Series(
orig_data1._from_sequence(
[a <= b for (a, b) in zip(list(orig_data1), list(orig_data2))]
)
)
self.assert_series_equal(result, expected)
val = s1.iloc[0]
result = s1.combine(val, lambda x1, x2: x1 <= x2)
expected = pd.Series(
orig_data1._from_sequence([a <= val for a in list(orig_data1)])
)
self.assert_series_equal(result, expected)
def test_combine_add(self, data_repeated, dtype):
if dtype.name in [
"fletcher_chunked[date64[ms]]",
"fletcher_continuous[date64[ms]]",
]:
pytest.skip(
"unsupported operand type(s) for +: 'datetime.date' and 'datetime.date"
)
else:
BaseMethodsTests.test_combine_add(self, data_repeated)
@xfail_bool_too_few_uniques
def test_argsort(self, data_for_sorting):
BaseMethodsTests.test_argsort(self, data_for_sorting)
@xfail_bool_too_few_uniques
def test_argmin_argmax(self, data_for_sorting, data_missing_for_sorting, na_value):
if PANDAS_GE_1_1_0:
BaseMethodsTests.test_argmin_argmax(
self, data_for_sorting, data_missing_for_sorting, na_value
)
else:
pass
@pytest.mark.parametrize("ascending", [True, False])
@xfail_bool_too_few_uniques
def test_sort_values(self, data_for_sorting, ascending, sort_by_key):
if PANDAS_GE_1_1_0:
BaseMethodsTests.test_sort_values(
self, data_for_sorting, ascending, sort_by_key
)
else:
BaseMethodsTests.test_sort_values(self, data_for_sorting, ascending)
@pytest.mark.parametrize("na_sentinel", [-1, -2])
@xfail_bool_too_few_uniques
@xfail_missing_list_dict_encode
def test_factorize(self, data_for_grouping, na_sentinel):
BaseMethodsTests.test_factorize(self, data_for_grouping, na_sentinel)
@pytest.mark.parametrize("na_sentinel", [-1, -2])
@xfail_bool_too_few_uniques
@xfail_missing_list_dict_encode
def test_factorize_equivalence(self, data_for_grouping, na_sentinel):
BaseMethodsTests.test_factorize_equivalence(
self, data_for_grouping, na_sentinel
)
@pytest.mark.parametrize("ascending", [True, False])
@xfail_missing_list_dict_encode
def test_sort_values_frame(self, data_for_sorting, ascending):
BaseMethodsTests.test_sort_values_frame(self, data_for_sorting, ascending)
@xfail_bool_too_few_uniques
def test_searchsorted(self, data_for_sorting, as_series): # noqa: F811
BaseMethodsTests.test_searchsorted(self, data_for_sorting, as_series)
@pytest.mark.parametrize("box", [pd.Series, lambda x: x])
@pytest.mark.parametrize("method", [lambda x: x.unique(), pd.unique])
@xfail_missing_list_dict_encode
def test_unique(self, data, box, method):
BaseMethodsTests.test_unique(self, data, box, method)
@xfail_missing_list_dict_encode
def test_factorize_empty(self, data):
BaseMethodsTests.test_factorize_empty(self, data)
def test_fillna_copy_frame(self, data_missing):
if pa.types.is_list(data_missing.dtype.arrow_dtype):
pytest.xfail("pandas' fillna cannot cope with lists as a scalar")
else:
BaseMethodsTests.test_fillna_copy_frame(self, data_missing)
def test_fillna_copy_series(self, data_missing):
if pa.types.is_list(data_missing.dtype.arrow_dtype):
pytest.xfail("pandas' fillna cannot cope with lists as a scalar")
else:
BaseMethodsTests.test_fillna_copy_series(self, data_missing)
@xfail_list_setitem_not_implemented
def test_combine_first(self, data):
BaseMethodsTests.test_combine_first(self, data)
@xfail_list_setitem_not_implemented
def test_shift_0_periods(self, data):
if PANDAS_GE_1_1_0:
BaseMethodsTests.test_shift_0_periods(self, data)
def test_shift_fill_value(self, data):
if pa.types.is_list(data.dtype.arrow_dtype):
pytest.xfail("pandas' isna cannot cope with lists")
else:
BaseMethodsTests.test_shift_fill_value(self, data)
def test_hash_pandas_object_works(self, data, as_frame): # noqa: F811
if pa.types.is_list(data.dtype.arrow_dtype):
pytest.xfail("Fails on hashing ndarrays")
else:
BaseMethodsTests.test_hash_pandas_object_works(self, data, as_frame)
@xfail_list_setitem_not_implemented
def test_where_series(self, data, na_value, as_frame): # noqa: F811
BaseMethodsTests.test_where_series(self, data, na_value, as_frame)
class TestBaseMissingTests(BaseMissingTests):
@pytest.mark.parametrize("method", ["ffill", "bfill"])
def test_fillna_series_method(self, data_missing, method):
BaseMissingTests.test_fillna_series_method(self, data_missing, method)
def test_fillna_frame(self, data_missing):
if pa.types.is_list(data_missing.dtype.arrow_dtype):
pytest.xfail("pandas' fillna cannot cope with lists as a scalar")
else:
BaseMissingTests.test_fillna_frame(self, data_missing)
def test_fillna_scalar(self, data_missing):
if pa.types.is_list(data_missing.dtype.arrow_dtype):
pytest.xfail("pandas' fillna cannot cope with lists as a scalar")
else:
BaseMissingTests.test_fillna_scalar(self, data_missing)
def test_fillna_series(self, data_missing):
if pa.types.is_list(data_missing.dtype.arrow_dtype):
pytest.xfail("pandas' fillna cannot cope with lists as a scalar")
else:
BaseMissingTests.test_fillna_series(self, data_missing)
class TestBaseReshapingTests(BaseReshapingTests):
def test_concat_mixed_dtypes(self, data, dtype):
arrow_dtype = data.dtype.arrow_dtype
if (
pa.types.is_integer(arrow_dtype)
or pa.types.is_floating(arrow_dtype)
or pa.types.is_boolean(arrow_dtype)
):
# https://github.com/pandas-dev/pandas/issues/21792
pytest.skip("pd.concat(int64, fletcher_chunked[int64] yields int64")
elif pa.types.is_temporal(arrow_dtype):
# https://github.com/pandas-dev/pandas/issues/33331
pytest.xfail("pd.concat(temporal, categorical) mangles dates")
else:
BaseReshapingTests.test_concat_mixed_dtypes(self, data)
def test_merge_on_extension_array(self, data):
if pa.types.is_list(data.dtype.arrow_dtype):
pytest.xfail("pandas tries to hash scalar lists")
else:
BaseReshapingTests.test_merge_on_extension_array(self, data)
def test_merge_on_extension_array_duplicates(self, data):
if pa.types.is_list(data.dtype.arrow_dtype):
pytest.xfail("pandas tries to hash scalar lists")
else:
BaseReshapingTests.test_merge_on_extension_array_duplicates(self, data)
@xfail_list_setitem_not_implemented
def test_ravel(self, data):
BaseReshapingTests.test_ravel(self, data)
class TestBaseSetitemTests(BaseSetitemTests):
@xfail_list_setitem_not_implemented
def test_setitem_scalar_series(self, data, box_in_series):
BaseSetitemTests.test_setitem_scalar_series(self, data, box_in_series)
@xfail_list_setitem_not_implemented
def test_setitem_sequence(self, data, box_in_series):
BaseSetitemTests.test_setitem_sequence(self, data, box_in_series)
@xfail_list_setitem_not_implemented
def test_setitem_empty_indxer(self, data, box_in_series):
BaseSetitemTests.test_setitem_empty_indxer(self, data, box_in_series)
@xfail_list_setitem_not_implemented
def test_setitem_sequence_broadcasts(self, data, box_in_series):
BaseSetitemTests.test_setitem_sequence_broadcasts(self, data, box_in_series)
@pytest.mark.parametrize("setter", ["loc", "iloc"])
@xfail_list_setitem_not_implemented
def test_setitem_scalar(self, data, setter):
BaseSetitemTests.test_setitem_scalar(self, data, setter)
@xfail_list_setitem_not_implemented
def test_setitem_loc_scalar_mixed(self, data):
| BaseSetitemTests.test_setitem_loc_scalar_mixed(self, data) | pandas.tests.extension.base.BaseSetitemTests.test_setitem_loc_scalar_mixed |
import numpy as np
import pytest
import pandas as pd
from pandas import (
CategoricalIndex,
Index,
Series,
)
import pandas._testing as tm
class TestMap:
@pytest.mark.parametrize(
"data, categories",
[
(list("abcbca"), list("cab")),
(pd.interval_range(0, 3).repeat(3), pd.interval_range(0, 3)),
],
ids=["string", "interval"],
)
def test_map_str(self, data, categories, ordered):
# GH 31202 - override base class since we want to maintain categorical/ordered
index = CategoricalIndex(data, categories=categories, ordered=ordered)
result = index.map(str)
expected = CategoricalIndex(
map(str, data), categories=map(str, categories), ordered=ordered
)
tm.assert_index_equal(result, expected)
def test_map(self):
ci = CategoricalIndex(list("ABABC"), categories=list("CBA"), ordered=True)
result = ci.map(lambda x: x.lower())
exp = CategoricalIndex(list("ababc"), categories=list("cba"), ordered=True)
tm.assert_index_equal(result, exp)
ci = CategoricalIndex(
list("ABABC"), categories=list("BAC"), ordered=False, name="XXX"
)
result = ci.map(lambda x: x.lower())
exp = CategoricalIndex(
list("ababc"), categories=list("bac"), ordered=False, name="XXX"
)
tm.assert_index_equal(result, exp)
# GH 12766: Return an index not an array
tm.assert_index_equal(
ci.map(lambda x: 1), Index(np.array([1] * 5, dtype=np.int64), name="XXX")
)
# change categories dtype
ci = CategoricalIndex(list("ABABC"), categories=list("BAC"), ordered=False)
def f(x):
return {"A": 10, "B": 20, "C": 30}.get(x)
result = ci.map(f)
exp = CategoricalIndex(
[10, 20, 10, 20, 30], categories=[20, 10, 30], ordered=False
)
tm.assert_index_equal(result, exp)
result = ci.map(Series([10, 20, 30], index=["A", "B", "C"]))
tm.assert_index_equal(result, exp)
result = ci.map({"A": 10, "B": 20, "C": 30})
| tm.assert_index_equal(result, exp) | pandas._testing.assert_index_equal |
import os
import sys
import gpflow
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
from sklearn import svm
from sklearn import metrics
from fffit.utils import (
shuffle_and_split,
values_scaled_to_real,
values_real_to_scaled,
)
from fffit.models import run_gpflow_scipy
from fffit.pareto import find_pareto_set, is_pareto_efficient
sys.path.append("../")
from utils.ap import AP
from utils.prepare_samples import prepare_df
############################# QUANTITIES TO EDIT #############################
##############################################################################
iternum = 3
gp_shuffle_seed = 588654
clf_shuffle_seed = 19485
distance_seed = 15
##############################################################################
##############################################################################
temperatures = [10, 78, 298]
ucmd_clf_threshold = 0.8
ucmd_next_itr_threshold = 0.2
lattice_mape_next_itr_threshold = 1.5
symm_clf_threshold = 0.001
csv_path = "/scratch365/bbefort/ap-fffit/ap-fffit/analysis/csv/"
in_csv_names = [
"uc-lattice-iter" + str(i) + "-results.csv" for i in range(1, iternum+1)
]
out_csv_name = "uc-lattice-iter" + str(iternum+1) + "-params.csv"
# Read files
df_csvs = [
pd.read_csv(csv_path + in_csv_name, index_col=0)
for in_csv_name in in_csv_names
]
df_csv = | pd.concat(df_csvs) | pandas.concat |
# import libraries
import pandas as pd
from sqlalchemy import create_engine
import sys
def load_data(messages_filepath, categories_filepath):
""" load two datasets from each filepath
Input: two filepaths
Output: merged dataframe of both datasets
"""
# load both dfs
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
# merge dfs
df = pd.merge(messages, categories, how='inner')
return df
def clean_data(df):
""" cleans dataframe """
# split categories column into separate columns
categories = df['categories'].str.split(';', expand=True)
# select the first row of the categories dataframe
row = categories.iloc[0, :]
# use this row to extract a list of new column names for categories
category_colnames = row.apply(lambda x: x[:-2])
# rename the columns of `categories`
categories.columns = category_colnames
# convert dataframe to string type
categories.astype(str)
# set each value to be the last character of the string and convert it to numeric
for column in categories:
categories[column] = categories[column].apply(lambda x: | pd.to_numeric(x[-1]) | pandas.to_numeric |
# Import the needed libraries
import sys
import numpy as np
from tensorflow.python.training.gradient_descent import GradientDescentOptimizer
import pandas as pd
import tensorflow as tf
#print("hello")
train = pd.read_csv("../DataSet/trainingWithSolution.csv")
train = train.drop("solution", axis=1)
lables = train.columns.values
alarm = lables[-1]
# TRAIN DATASET
Xtrain = train.drop(lables[-1], axis=1)
# Encode target values into binary ('one-hot' style) representation
ytrain = | pd.get_dummies(train.iloc[:,-1]) | pandas.get_dummies |
# coding=utf-8
import numpy as np
from pandas import (DataFrame, date_range, Timestamp, Series,
to_datetime)
import pandas.util.testing as tm
from .common import TestData
class TestFrameAsof(TestData):
def setup_method(self, method):
self.N = N = 50
self.rng = date_range('1/1/1990', periods=N, freq='53s')
self.df = DataFrame({'A': np.arange(N), 'B': np.arange(N)},
index=self.rng)
def test_basic(self):
df = self.df.copy()
df.loc[15:30, 'A'] = np.nan
dates = date_range('1/1/1990', periods=self.N * 3,
freq='25s')
result = df.asof(dates)
assert result.notnull().all(1).all()
lb = df.index[14]
ub = df.index[30]
dates = list(dates)
result = df.asof(dates)
assert result.notnull().all(1).all()
mask = (result.index >= lb) & (result.index < ub)
rs = result[mask]
assert (rs == 14).all(1).all()
def test_subset(self):
N = 10
rng = date_range('1/1/1990', periods=N, freq='53s')
df = DataFrame({'A': np.arange(N), 'B': np.arange(N)},
index=rng)
df.loc[4:8, 'A'] = np.nan
dates = date_range('1/1/1990', periods=N * 3,
freq='25s')
# with a subset of A should be the same
result = df.asof(dates, subset='A')
expected = df.asof(dates)
tm.assert_frame_equal(result, expected)
# same with A/B
result = df.asof(dates, subset=['A', 'B'])
expected = df.asof(dates)
tm.assert_frame_equal(result, expected)
# B gives self.df.asof
result = df.asof(dates, subset='B')
expected = df.resample('25s', closed='right').ffill().reindex(dates)
expected.iloc[20:] = 9
tm.assert_frame_equal(result, expected)
def test_missing(self):
# GH 15118
# no match found - `where` value before earliest date in index
N = 10
rng = date_range('1/1/1990', periods=N, freq='53s')
df = DataFrame({'A': np.arange(N), 'B': np.arange(N)},
index=rng)
result = df.asof('1989-12-31')
expected = Series(index=['A', 'B'], name=Timestamp('1989-12-31'))
tm.assert_series_equal(result, expected)
result = df.asof(to_datetime(['1989-12-31']))
expected = DataFrame(index=to_datetime(['1989-12-31']),
columns=['A', 'B'], dtype='float64')
tm.assert_frame_equal(result, expected)
def test_all_nans(self):
# GH 15713
# DataFrame is all nans
result = DataFrame([np.nan]).asof([0])
expected = DataFrame([np.nan])
tm.assert_frame_equal(result, expected)
# testing non-default indexes, multiple inputs
dates = date_range('1/1/1990', periods=self.N * 3, freq='25s')
result = DataFrame(np.nan, index=self.rng, columns=['A']).asof(dates)
expected = DataFrame(np.nan, index=dates, columns=['A'])
tm.assert_frame_equal(result, expected)
# testing multiple columns
dates = date_range('1/1/1990', periods=self.N * 3, freq='25s')
result = DataFrame(np.nan, index=self.rng,
columns=['A', 'B', 'C']).asof(dates)
expected = DataFrame(np.nan, index=dates, columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
# testing scalar input
result = DataFrame(np.nan, index=[1, 2], columns=['A', 'B']).asof([3])
expected = | DataFrame(np.nan, index=[3], columns=['A', 'B']) | pandas.DataFrame |
"""
Module for building a complete daily dataset from ivolatility's raw iv options data.
"""
from io import BytesIO
from zipfile import ZipFile
import numpy as np
import pandas as pd
import requests
from click import progressbar
from logbook import Logger
from six import iteritems
from six.moves.urllib.parse import urlencode
from zipline.data.bundles.core import most_recent_data
from . import core as bundles
log = Logger(__name__)
ONE_MEGABYTE = 1024 * 1024
# rut eod data 2006-07-28 to 2014-09-04
IVOLATILITY_DATA_URL = "https://www.dropbox.com/s/w59tcq8jc02w0vp/rut-eod-20060728-20140904.zip?"
# rut eod data 2000-11-01 to 2020-05-19
# IVOLATILITY_DATA_URL = "https://www.dropbox.com/s/494wx0vum1y0vx1/rut-eod.zip?"
# rut 1545 snapshot data 2003-09-18 to 2020-05-19
# IVOLATILITY_DATA_URL = "https://www.dropbox.com/s/e70501splbwsomt/rut-1545.zip?"
def format_metadata_url(api_key):
""" Build the query URL for Quandl WIKI Prices metadata.
"""
query_params = [("api_key", api_key), ("dl", 1)]
return IVOLATILITY_DATA_URL + urlencode(query_params)
def load_data_table(file, index_col, show_progress=False):
""" Load data table from CSV file provided by ivolatility.
"""
with ZipFile(file) as zip_file:
data_tables = []
file_names = [x for x in zip_file.namelist() if not x.startswith("__")]
assert len(file_names) > 1, "Expected at least one file from iVolatility."
for data_file in file_names:
with zip_file.open(data_file) as table_file:
if show_progress:
log.info(f"Parsing raw data from {table_file.name}.")
data_table = pd.read_csv(
table_file,
parse_dates=["date", "option_expiration"],
index_col=index_col,
usecols=[
"date",
"symbol", # -> root_symbol
"exchange",
"company_name", # -> asset_name
"stock_price_close", # -> adjusted_underlying_close
"option_symbol",
"option_expiration", # -> expiration_date
"strike", # -> strike_price
"call_put", # -> option_type
"style",
# "open",
# "high",
# "low",
# "close",
"bid",
"ask",
"mean_price", # -> mid
# "settlement",
"iv", # -> implied_volatility
"volume",
"open_interest",
"stock_price_for_iv", # -> unadjusted_underlying_close
# "forward_price",
# "isinterpolated",
"delta",
"vega",
"gamma",
"theta",
"rho",
],
)
data_table.rename(
columns={
"symbol": "root_symbol",
"company_name": "asset_name",
"stock_price_close": "adjusted_underlying_close",
"option_symbol": "symbol",
"option_expiration": "expiration_date",
"strike": "strike_price",
"call_put": "option_type",
"mean_price": "mid",
"iv": "implied_volatility",
"stock_price_for_iv": "unadjusted_underlying_close",
},
inplace=True,
copy=False,
)
data_tables.append(data_table)
return pd.concat(data_tables)
def fetch_data_table(api_key, show_progress, retries):
""" Fetch price data table from ivolatility
"""
for _ in range(retries):
try:
if show_progress:
log.info("Downloading metadata.")
table_url = format_metadata_url(api_key)
if show_progress:
raw_file = download_with_progress(
table_url,
chunk_size=ONE_MEGABYTE,
label="Downloading option price table from ivolatility",
)
else:
raw_file = download_without_progress(table_url)
return load_data_table(
file=raw_file, index_col=None, show_progress=show_progress
)
except Exception:
log.exception("Exception raised reading ivolatility data. Retrying.")
else:
raise ValueError(
"Failed to download ivolatility data after %d attempts." % (retries)
)
def gen_root_symbols(data, show_progress):
if show_progress:
log.info("Generating asset root symbols.")
data = data.groupby(by="root_symbol").agg({"exchange": "first"})
data.reset_index(inplace=True)
return data
def gen_asset_metadata(data, show_progress):
if show_progress:
log.info("Generating asset metadata.")
data = data.groupby(by="occ_symbol").agg(
{
"symbol": "first",
"root_symbol": "first",
"asset_name": "first",
"date": [np.min, np.max],
"exchange": "first",
"expiration_date": "first",
"strike_price": "first",
"option_type": "first",
"style": "first",
}
)
data.reset_index(inplace=True)
data["start_date"] = data.date.amin
data["end_date"] = data.date.amax
del data["date"]
data.columns = data.columns.get_level_values(0)
data["asset_name"] = data.asset_name
data["tick_size"] = 0.01
data["multiplier"] = 100.0
data["first_traded"] = data["start_date"]
data["auto_close_date"] = data["expiration_date"].values + pd.Timedelta(days=1)
return data
def _gen_symbols(data, show_progress):
if show_progress:
log.info("Generating OCC symbols.")
data["symbol"] = [x.replace(" ", "") for x in data.symbol.values]
root_symbol_fmt = [
"{:6}".format(x.upper()).replace(" ", "") for x in data.root_symbol.values
]
expiration_fmt = [
pd.Timestamp(x).strftime("%y%m%d")
for x in data.expiration_date.values.astype("datetime64[D]")
]
option_type_fmt = [x.upper() for x in data.option_type.values]
strike_fmt = [
"{:09.3f}".format(float(x)).replace(".", "") for x in data.strike_price.values
]
occ_format = lambda x: f"{x[0]}{x[1]}{x[2]}{x[3]}"
mapped = map(
occ_format, zip(root_symbol_fmt, expiration_fmt, option_type_fmt, strike_fmt)
)
data["occ_symbol"] = list(mapped)
return data
def _get_price_metadata(data, show_progress):
if show_progress:
log.info("Generating mid, spread, moneyness and days to expiration.")
data["interest_rate"] = np.nan
data["statistical_volatility"] = np.nan
data["option_value"] = np.nan
bid = data.bid.values
ask = data.ask.values
bid[np.isnan(bid)] = 0.0
ask[np.isnan(ask)] = 0.0
data["spread"] = ask - bid
# create the close price because it's used everywhere
data["close"] = data.mid
data["moneyness"] = np.nan
calls = data[data.option_type == "C"]
puts = data[data.option_type == "P"]
data.loc[data.option_type == "C", "moneyness"] = (
calls.strike_price.values / calls.unadjusted_underlying_close.values
)
data.loc[data.option_type == "P", "moneyness"] = (
puts.unadjusted_underlying_close.values / puts.strike_price.values
)
data["days_to_expiration"] = (
data.expiration_date.values - data.date.values
).astype("timedelta64[D]")
return data
def gen_valuation_metadata(data, show_progress):
data = _gen_symbols(data, show_progress)
data = _get_price_metadata(data, show_progress)
return data
def parse_pricing_and_vol(data, sessions, symbol_map):
for asset_id, occ_symbol in iteritems(symbol_map):
asset_data = (
data.xs(occ_symbol, level=1).reindex(sessions.tz_localize(None)).fillna(0.0)
)
yield asset_id, asset_data
@bundles.register(
"ivolatility-raw-iv",
# start and end sessions of the dtr trading examples
start_session=pd.Timestamp("2006-07-28", tz="UTC"),
end_session= | pd.Timestamp("2014-09-04", tz="UTC") | pandas.Timestamp |
import os
import typing
import hdpc
import numpy as np
import pandas as pd
from d3m import container, utils
from d3m.metadata import hyperparams, base as metadata_base
from d3m.metadata import params
from d3m.primitive_interfaces import base
from d3m.primitive_interfaces.unsupervised_learning import UnsupervisedLearnerPrimitiveBase
from sklearn.feature_extraction.text import CountVectorizer
from fastlvm.utils import get_documents, mk_text_features, tokenize, split_inputs
Inputs = container.DataFrame
Outputs = container.DataFrame
Predicts = container.ndarray # type: np.ndarray
class Params(params.Params):
topic_matrix: bytes # Byte stream represening topics
vectorizer: typing.Any
analyze: typing.Any
class HyperParams(hyperparams.Hyperparams):
k = hyperparams.UniformInt(lower=1, upper=10000, default=10,
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
description='The number of clusters to form as well as the number of centroids to '
'generate.')
iters = hyperparams.UniformInt(lower=1, upper=10000, default=100,
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
description='The number of iterations of inference.')
num_top = hyperparams.UniformInt(lower=1, upper=10000, default=15,
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
description='The number of top words requested')
frac = hyperparams.Uniform(lower=0, upper=1, default=0.01, upper_inclusive=False,
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
description='The fraction of training data set aside as the validation. 0 = use all '
'training as validation')
class HDP(UnsupervisedLearnerPrimitiveBase[Inputs, Outputs, Params, HyperParams]):
"""
This class provides functionality for Hierarchical Dirichlet Process, which is a nonparametric Bayesian model for
topic modelling on corpora of documents which seeks to represent the underlying thematic structure of the
document collection. They have emerged as a powerful new technique of finding useful structure in an unstructured
collection as it learns distributions over words. The high probability words in each distribution gives us a way
of understanding the contents of the corpus at a very high level. In HDP, each document of the corpus is assumed
to have a distribution over K topics, where the discrete topic distributions are drawn from a symmetric dirichlet
distribution. As it is a nonparametric model, the number of topics K is inferred automatically. The API is
similar to its parametric equivalent sklearn.decomposition.LatentDirichletAllocation. The class is pickle-able.
"""
__author__ = 'CMU'
__metadata__ = {
"common_name": "Hierarchical Dirichlet Process Topic Modelling",
"algorithm_type": ["Bayesian", "Clustering", "Probabilistic Graphical Models"],
"handles_classification": False,
"handles_regression": False,
"handles_multiclass": False,
"handles_multilabel": False,
"input_type": ["DENSE"],
"output_type": ["PREDICTIONS"],
"schema_version": 1.0,
"compute_resources": {
"sample_size": [],
"sample_unit": [],
"disk_per_node": [],
"expected_running_time": [],
"gpus_per_node": [],
"cores_per_node": [],
"mem_per_gpu": [],
"mem_per_node": [],
"num_nodes": [],
},
}
metadata = metadata_base.PrimitiveMetadata({
"id": "e582e738-2f7d-4b5d-964f-022d15f19018",
"version": "3.1.1",
"name": "Hierarchical Dirichlet Process Topic Modelling",
"description": "This class provides functionality for Hierarchical Dirichlet Process, which is a "
"nonparametric Bayesian model for topic modelling on corpora of documents which seeks to "
"represent the underlying thematic structure of the document collection. They have emerged as "
"a powerful new technique of finding useful structure in an unstructured collection as it "
"learns distributions over words. The high probability words in each distribution gives us a "
"way of understanding the contents of the corpus at a very high level. In HDP, each document "
"of the corpus is assumed to have a distribution over K topics, where the discrete topic "
"distributions are drawn from a symmetric dirichlet distribution. As it is a nonparametric "
"model, the number of topics K is inferred automatically. The API is similar to its parametric "
"equivalent sklearn.decomposition.LatentDirichletAllocation. The class is pickle-able.",
"python_path": "d3m.primitives.natural_language_processing.hdp.Fastlvm",
"primitive_family": metadata_base.PrimitiveFamily.NATURAL_LANGUAGE_PROCESSING,
"algorithm_types": ["LATENT_DIRICHLET_ALLOCATION"],
"keywords": ["large scale HDP", "Bayesian Nonparametrics", "topic modeling", "clustering"],
"source": {
"name": "CMU",
"contact": "mailto:<EMAIL>",
"uris": ["https://gitlab.datadrivendiscovery.org/cmu/fastlvm", "https://github.com/autonlab/fastlvm"]
},
"installation": [
{
"type": "PIP",
"package_uri": 'git+https://github.com/autonlab/fastlvm.git@{git_commit}#egg=fastlvm'.format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)))
}
]
})
def __init__(self, *, hyperparams: HyperParams, random_seed: int = 0) -> None:
# super(HDP, self).__init__()
super().__init__(hyperparams=hyperparams, random_seed=random_seed)
self._this = None
self._k = hyperparams['k']
self._iters = hyperparams['iters']
self._num_top = hyperparams['num_top']
self._frac = hyperparams['frac'] # the fraction of training data set aside as the validation
self._training_inputs = None # type: Inputs
self._fitted = False
self._ext = None
self._vectorizer = None # for tokenization
self._analyze = None # to tokenize raw documents
self.hyperparams = hyperparams
def __del__(self):
if self._this is not None:
hdpc.delete(self._this, self._ext)
def set_training_data(self, *, inputs: Inputs) -> None:
"""
Sets training data for HDP.
Parameters
----------
inputs : Inputs
A list of 1d numpy array of dtype uint32. Each numpy array contains a document with each token mapped to its word id.
"""
self._training_inputs = inputs
self._fitted = False
def fit(self, *, timeout: float = None, iterations: int = None) -> base.CallResult[None]:
"""
Inference on the hierarchical Dirichlet process model
"""
if self._fitted:
return
if self._training_inputs is None:
raise ValueError("Missing training data.")
# Create documents from the data-frame
raw_documents = get_documents(self._training_inputs)
if raw_documents is None: # training data contains no text fields
self._fitted = True
if self._this is not None:
hdpc.delete(self._this, self._ext)
self._this = None
return base.CallResult(None)
# Extract the vocabulary from the inputs data-frame
self._vectorizer = CountVectorizer()
self._vectorizer.fit(raw_documents)
vocab_size = len(self._vectorizer.vocabulary_)
# Build analyzer that handles tokenization
self._analyze = self._vectorizer.build_analyzer()
vocab = ['w' + str(i) for i in range(vocab_size)]
if self.random_seed is None: # seed is not set
self._this = hdpc.new(self._k, self._iters, vocab, self._num_top, 0, self.random_seed)
else: # use the given seed
self._this = hdpc.new(self._k, self._iters, vocab, self._num_top, 1, self.random_seed)
# Tokenize documents
tokenized = tokenize(raw_documents, self._vectorizer.vocabulary_, self._analyze)
# Uniformly split the data to training and validation
training, validation = split_inputs(tokenized, self._frac, self.random_seed)
hdpc.fit(self._this, training.tolist(), validation.tolist())
self._fitted = True
return base.CallResult(None)
def get_call_metadata(self) -> bool:
"""
Returns metadata about the last ``fit`` call if it succeeded
Returns
-------
Status : bool
True/false status of fitting.
"""
return self._fitted
def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> base.CallResult[Outputs]:
"""
Finds the token topic assignment (and consequently topic-per-document distribution) for the given set of docs using the learned model.
Parameters
----------
inputs : Inputs
A list of 1d numpy array of dtype uint32. Each numpy array contains a document with each token mapped to its word id.
Returns
-------
Outputs
A list of 1d numpy array which represents index of the topic each token belongs to.
"""
if self._this is None:
return base.CallResult(inputs)
raw_documents, non_text_features = get_documents(inputs, non_text=True)
tokenized = tokenize(raw_documents, self._vectorizer.vocabulary_, self._analyze)
predicted = hdpc.predict(self._this,
tokenized.tolist()) # per word topic assignment # TODO investigate why some index is bigger than self._k
text_features = mk_text_features(predicted, self._k)
# concatenate the features row-wise
features = | pd.concat([non_text_features, text_features], axis=1) | pandas.concat |
'''
Author: <NAME>
File: composite_frame
Trello: Goal 1
'''
from typing import List
import numpy as np
import pandas as pd
class Composite_Frame(object):
'''
The Composite_Frame class takes a pandas data frame containing network flow
information and splits into a list of frames, each representing the telemtry
of the network at a given time interval.
Dataset used: BoT IoT Dataset (10 best features CSV)
'''
def __init__(self, frame: pd.DataFrame, interval: int, max_frames: int = -1):
'''
Instance variables:
@self.items -> The list of dataframes derived from the parent frame
@self._interval -> The time interval to split the parent frame on
@self.max_frames -> The maximum number of time frames to include in
the composite frame. Default is set to 10,000
'''
if max_frames < 0:
self.max_frames = 10000
else:
self.max_frames = max_frames
self.items: List[pd.DataFrame] = self._split_frame(frame, interval)
self._interval = interval
def _insert_row(self, row_number: int, df: pd.DataFrame, row_value):
'''
Borrowed this function from the following Geeks for Geeks
article: https://www.geeksforgeeks.org/insert-row-at-given-position-in-pandas-dataframe/
'''
# Split old dataframe
df1 = df[0:row_number]
df2 = df[row_number:]
# Add new row to first subframe
df1.loc[row_number] = row_value
# Create new dataframe from two subframes
df_result = pd.concat([df1, df2])
df_result.index = [*range(df_result.shape[0])]
return df_result
def _split_flow(self, df: pd.DataFrame, index: int, interval: int, min_stime: int, max_ltime: int):
# Insert a copy of the new row at the appropriate place in the df
row = df.iloc[index]
try: # Attempt to acquire insert index
insert_index = df[df.stime > max_ltime].index[0]
except: # Attempt failed, insert index should be end of dataframe
insert_index = -1
if insert_index >= 0:
df = self._insert_row(int(insert_index), df, row)
else:
insert_index = df.shape[0]
df.loc[insert_index] = row
# Calculate percent of the flow which is in the current window
percent_in_frame = (max_ltime-row.stime)/(row.ltime-row.stime)
# Adjust values for the original flow
df.at[index, 'ltime'] = max_ltime
df.at[index, 'TnBPSrcIP'] = row.TnBPSrcIP*percent_in_frame
# Adjust values for the new flowl
df.at[insert_index, 'stime'] = max_ltime
df.at[insert_index, 'TnBPSrcIP'] = row.TnBPSrcIP * \
(1.-percent_in_frame)
return df
def _process_flow(self, current_stime, min_stime, current_ltime, max_ltime, current_frame, traffic, index, interval):
if current_stime >= min_stime and current_ltime < max_ltime:
current_frame.append(traffic.iloc[index])
elif current_stime >= min_stime and current_stime < max_ltime and \
current_ltime >= max_ltime:
traffic = self._split_flow(
traffic, index, interval, min_stime, max_ltime)
current_frame.append(traffic.iloc[index])
return traffic, current_frame
def _split_frame(self, df: pd.DataFrame, interval: int):
# Order the data frame by start time
traffic = df.sort_values(by=['stime']).reset_index()
traffic = traffic.filter(['saddr', 'stime', 'ltime', 'TnBPSrcIP'])
# Variables for tracking the progress of the function
progress = 0.
# Loop Variables
index = 0
time_frames = [] # list to hold subframes
current_frame = [] # Current data frame being populated
# Starting point of current time frame
min_stime = traffic.iloc[0].stime
max_ltime = min_stime + interval # ending point of current time frame
# Main loop
while not traffic.empty:
# Find start and end time of current flow
current_stime, current_ltime = traffic.iloc[index].stime, traffic.iloc[index].ltime
if current_stime < max_ltime:
traffic, current_frame = self._process_flow(current_stime, min_stime, current_ltime,
max_ltime, current_frame, traffic, index, interval)
else:
# Update loop variables
current_frame = | pd.DataFrame(current_frame) | pandas.DataFrame |
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import Categorical, DataFrame, Index, Series
import pandas._testing as tm
class TestDataFrameIndexingCategorical:
def test_assignment(self):
# assignment
df = DataFrame(
{"value": np.array(np.random.randint(0, 10000, 100), dtype="int32")}
)
labels = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
df = df.sort_values(by=["value"], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df["D"] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype("int32"), CategoricalDtype(categories=labels, ordered=False)],
index=["value", "D"],
)
tm.assert_series_equal(result, expected)
df["E"] = s
str(df)
result = df.dtypes
expected = Series(
[
np.dtype("int32"),
CategoricalDtype(categories=labels, ordered=False),
CategoricalDtype(categories=labels, ordered=False),
],
index=["value", "D", "E"],
)
tm.assert_series_equal(result, expected)
result1 = df["D"]
result2 = df["E"]
tm.assert_categorical_equal(result1._mgr._block.values, d)
# sorting
s.name = "E"
tm.assert_series_equal(result2.sort_index(), s.sort_index())
cat = Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = DataFrame(Series(cat))
def test_assigning_ops(self):
# systematically test the assigning operations:
# for all slicing ops:
# for value in categories and value not in categories:
# - assign a single value -> exp_single_cats_value
# - assign a complete row (mixed values) -> exp_single_row
# assign multiple rows (mixed values) (-> array) -> exp_multi_row
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
cats = Categorical(["a", "a", "a", "a", "a", "a", "a"], categories=["a", "b"])
idx = Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 1, 1, 1, 1, 1, 1]
orig = DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
# changed single row
cats1 = Categorical(["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx1 = Index(["h", "i", "j", "k", "l", "m", "n"])
values1 = [1, 1, 2, 1, 1, 1, 1]
exp_single_row = DataFrame({"cats": cats1, "values": values1}, index=idx1)
# changed multiple rows
cats2 = Categorical(["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx2 = Index(["h", "i", "j", "k", "l", "m", "n"])
values2 = [1, 1, 2, 2, 1, 1, 1]
exp_multi_row = DataFrame({"cats": cats2, "values": values2}, index=idx2)
# changed part of the cats column
cats3 = Categorical(["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx3 = Index(["h", "i", "j", "k", "l", "m", "n"])
values3 = [1, 1, 1, 1, 1, 1, 1]
exp_parts_cats_col = DataFrame({"cats": cats3, "values": values3}, index=idx3)
# changed single value in cats col
cats4 = Categorical(["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx4 = Index(["h", "i", "j", "k", "l", "m", "n"])
values4 = [1, 1, 1, 1, 1, 1, 1]
exp_single_cats_value = DataFrame(
{"cats": cats4, "values": values4}, index=idx4
)
# iloc
# ###############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.iloc[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.iloc[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
msg1 = (
"Cannot setitem on a Categorical with a new category, "
"set the categories first"
)
msg2 = "Cannot set a Categorical with another, without identical categories"
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.iloc[2, 0] = "c"
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.iloc[2, :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.iloc[2, :] = ["c", 2]
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.iloc[2:4, :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.iloc[2:4, :] = [["c", 2], ["c", 2]]
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError, match=msg2):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.iloc[2:4, 0] = Categorical(list("bb"), categories=list("abc"))
with pytest.raises(ValueError, match=msg2):
# different values
df = orig.copy()
df.iloc[2:4, 0] = Categorical(list("cc"), categories=list("abc"))
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError, match=msg1):
df.iloc[2:4, 0] = ["c", "c"]
# loc
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.loc["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.loc[df.index == "j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.loc["j", "cats"] = "c"
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.loc["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.loc["j", :] = ["c", 2]
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.loc["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.loc["j":"k", :] = [["c", 2], ["c", 2]]
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError, match=msg2):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.loc["j":"k", "cats"] = Categorical(
["b", "b"], categories=["a", "b", "c"]
)
with pytest.raises(ValueError, match=msg2):
# different values
df = orig.copy()
df.loc["j":"k", "cats"] = Categorical(
["c", "c"], categories=["a", "b", "c"]
)
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError, match=msg1):
df.loc["j":"k", "cats"] = ["c", "c"]
# loc
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.loc["j", df.columns[0]] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.loc[df.index == "j", df.columns[0]] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.loc["j", df.columns[0]] = "c"
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.loc["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.loc["j", :] = ["c", 2]
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.loc["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.loc["j":"k", :] = [["c", 2], ["c", 2]]
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", df.columns[0]] = Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError, match=msg2):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.loc["j":"k", df.columns[0]] = Categorical(
["b", "b"], categories=["a", "b", "c"]
)
with pytest.raises(ValueError, match=msg2):
# different values
df = orig.copy()
df.loc["j":"k", df.columns[0]] = Categorical(
["c", "c"], categories=["a", "b", "c"]
)
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", df.columns[0]] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError, match=msg1):
df.loc["j":"k", df.columns[0]] = ["c", "c"]
# iat
df = orig.copy()
df.iat[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.iat[2, 0] = "c"
# at
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.at["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.at["j", "cats"] = "c"
# fancy indexing
catsf = Categorical(
["a", "a", "c", "c", "a", "a", "a"], categories=["a", "b", "c"]
)
idxf = Index(["h", "i", "j", "k", "l", "m", "n"])
valuesf = [1, 1, 3, 3, 1, 1, 1]
df = DataFrame({"cats": catsf, "values": valuesf}, index=idxf)
exp_fancy = exp_multi_row.copy()
exp_fancy["cats"].cat.set_categories(["a", "b", "c"], inplace=True)
df[df["cats"] == "c"] = ["b", 2]
# category c is kept in .categories
tm.assert_frame_equal(df, exp_fancy)
# set_value
df = orig.copy()
df.at["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
with pytest.raises(ValueError, match=msg1):
df = orig.copy()
df.at["j", "cats"] = "c"
# Assigning a Category to parts of a int/... column uses the values of
# the Categorical
df = DataFrame({"a": [1, 1, 1, 1, 1], "b": list("aaaaa")})
exp = DataFrame({"a": [1, "b", "b", 1, 1], "b": list("aabba")})
df.loc[1:2, "a"] = Categorical(["b", "b"], categories=["a", "b"])
df.loc[2:3, "b"] = | Categorical(["b", "b"], categories=["a", "b"]) | pandas.Categorical |
import pandas as pd
#API import
from matlabs.owlracer import core_pb2
from grpcClient import core_pb2_grpc
from serviceClient import service, commands
def mainLoop():
env = service.Env(spectator=True)
carID = env.getCarIDs()
carID = core_pb2.GuidData(guidString = str(carID.guids[0]).split("\"")[1])
#print("On the server is {} car with IDs".format())
env.setCarID(carID=carID)
columList = ["MapNr","pX","pY","rotation","maxVelocity","acceleration","velocity","isCrashed","isDone","scoreStep","scoreOverall","ticks","dFront","dFrontL","dFrontR","dLeft","dRight","checkPoint"]
#loop variables
sampleNr=0
mapNr = 0
dataList = []
lastTick = -1
while(sampleNr<10):
step_result: core_pb2_grpc.RaceCarData = env.getCarData()
if step_result.ticks > lastTick:
dataList.append([mapNr,step_result.position.x, step_result.position.y, step_result.rotation, step_result.maxVelocity, step_result.acceleration, step_result.velocity, step_result.isCrashed, step_result.isDone, step_result.scoreStep, step_result.scoreOverall, step_result.ticks,
step_result.distance.front, step_result.distance.frontLeft, step_result.distance.frontRight, step_result.distance.left, step_result.distance.right, step_result.checkPoint])
#print("Car Pos: {} {}, Vel: {} forward distance {}".format(step_result.position.x, step_result.position.y, step_result.velocity, step_result.distance.front))
sampleNr += 1
lastTick = step_result.ticks
dataFrame = | pd.DataFrame(dataList, columns=columList) | pandas.DataFrame |
from datetime import datetime, time, date
from functools import partial
from dateutil import relativedelta
import calendar
from pandas import DateOffset, datetools, DataFrame, Series, Panel
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.resample import _get_range_edges
from pandas.core.groupby import DataFrameGroupBy, PanelGroupBy, BinGrouper
from pandas.tseries.resample import TimeGrouper
from pandas.tseries.offsets import Tick
from pandas.tseries.frequencies import _offset_map, to_offset
import pandas.lib as lib
import numpy as np
from trtools.monkey import patch, patch_prop
def _is_tick(offset):
return isinstance(offset, Tick)
## TODO See if I still need this. All this stuff was pre resample
def first_day(year, month, bday=True):
"""
Return first day of month. Default to business days
"""
weekday, days_in_month = calendar.monthrange(year, month)
if not bday:
return 1
if weekday <= 4:
return 1
else:
return 7-weekday+1
class MonthStart(DateOffset):
"""
Really the point of this is for DateRange, creating
a range where the month is anchored on day=1 and not the end
"""
def apply(self, other):
first = first_day(other.year, other.month)
if other.day == first:
result = other + relativedelta.relativedelta(months=1)
result = result.replace(day=first_day(result.year, result.month))
else:
result = other.replace(day=first)
return datetime(result.year, result.month, result.day)
def onOffset(self, someDate):
return someDate.day == first_day(someDate.year, someDate.month)
def daily_group(df):
daterange_func = partial(DatetimeIndex, freq=datetools.day)
return down_sample(df, daterange_func)
def weekly_group(df):
daterange_func = partial(DatetimeIndex, freq="W@MON")
return down_sample(df, daterange_func)
def monthly_group(df):
daterange_func = partial(DatetimeIndex, freq=MonthStart())
return down_sample(df, daterange_func)
def down_sample(obj, daterange_func):
if isinstance(obj, Panel):
index = obj.major_axis
else:
index = obj.index
start = datetime.combine(index[0].date(), time(0))
end = datetime.combine(index[-1].date(), time(0))
range = daterange_func(start=start, end=end)
grouped = obj.groupby(range.asof)
grouped._range = range
return grouped
# END TODO
def cols(self, *args):
return self.xs(list(args), axis=1)
def dropna_get(x, pos):
try:
return x.dropna().iget(pos)
except:
return None
def aggregate_picker(grouped, grouped_indices, col=None):
"""
In [276]: g.agg(np.argmax).high
Out[276]:
key_0
2007-04-27 281
2007-04-30 0
2007-05-01 5
2007-05-02 294
2007-05-03 3
2007-05-04 53
Should take something in that form and return a DataFrame with the proper date indexes and values...
"""
index = []
values = []
for key, group in grouped:
if col:
group = group[col]
sub_index = grouped_indices[key]
index.append(group.index[sub_index])
values.append(group.iget_value(sub_index))
return {'index':index, 'values':values}
# old version
def _kv_agg(grouped, func, col=None):
"""
Works like agg but returns index label and value for each hit
"""
if col:
sub_indices = grouped.agg({col: func})[col]
else:
sub_indices = grouped.agg(func)
data = aggregate_picker(grouped, sub_indices, col=col)
return TimeSeries(data['values'], index=data['index'])
def kv_agg(grouped, func, col=None):
"""
Simpler version that is a bit faster. Really, I don't use aggregate_picker,
which makes it slightly faster.
"""
index = []
values = []
for key, group in grouped:
if col:
group = group[col]
sub_index = func(group)
val = group.iget_value(sub_index)
values.append(val)
index.append(group.index[sub_index])
return TimeSeries(values, index=index)
def set_time(arr, hour, minute):
"""
Given a list of datetimes, set the time on all of them
"""
results = []
t = time(hour, minute)
for date in arr:
d = datetime.combine(date.date(), t)
results.append(d)
return results
def reset_time(df, hour, minute):
if isinstance(df, (DataFrame, Series)):
df.index = set_time(df.index, hour, minute)
if isinstance(df, Panel):
df.major_axis = set_time(df.major_axis, hour, minute)
return df
def max_groupby(grouped, col=None):
df = kv_agg(grouped, np.argmax, col)
return df
def trading_hours(df):
# assuming timestamp marks end of bar
inds = df.index.indexer_between_time(time(9,30),
time(16), include_start=False)
return df.take(inds)
times = np.vectorize(lambda x: x.time())
hours = np.vectorize(lambda x: x.time().hour)
minutes = np.vectorize(lambda x: x.time().minute)
def time_slice(series, hour=None, minute=None):
"""
Will vectorize a function taht returns a boolean array if value matches the hour
and/or minute
"""
bh = hour is not None
bm = minute is not None
if bh and bm:
t = time(hour, minute)
vec = np.vectorize(lambda x: x.time() == t)
if bh and not bm:
vec = np.vectorize(lambda x: x.time().hour == hour)
if not bh and bm:
vec = np.vectorize(lambda x: x.time().minute == minute)
return vec(series.index)
def end_asof(index, label):
"""
Like index.asof but places the timestamp to the end of the bar
"""
if label not in index:
loc = index.searchsorted(label, side='left')
if loc > 0:
return index[loc]
else:
return np.nan
return label
# TODO Forget where I was using this. I think pandas does this now.
class TimeIndex(object):
"""
Kind of like a DatetimeIndex, except it only cares about the time component of a Datetime object.
"""
def __init__(self, times):
self.times = times
def asof(self, date):
"""
Follows price is right rules. Will return the closest time that is equal or below.
If time is after the last date, it will just return the date.
"""
testtime = date.time()
last = None
for time in self.times:
if testtime == time:
return date
if testtime < time:
# found spot
break
last = time
# TODO should I anchor this to the last time?
if last is None:
return date
new_date = datetime.combine(date.date(), last)
return new_date
def get_time_index(freq, start=None, end=None):
if start is None:
start = "1/1/2012 9:30AM"
if end is None:
end = "1/1/2012 4:00PM"
ideal = DatetimeIndex(start=start, end=end, freq=freq)
times = [date.time() for date in ideal]
return TimeIndex(times)
def get_anchor_index(index, freq):
ideal = get_time_index(freq)
start = index[0]
start = ideal.asof(start)
end = index[-1]
start, end = _get_range_edges(index, offset=freq, closed='right')
ind = DatetimeIndex(start=start, end=end, freq=freq)
return ind
def anchor_downsample(obj, freq, axis=None):
"""
Point of this is to fix the freq to regular intervals like 9:30, 9:45, 10:00
and not 9:13, 9:28: 9:43
"""
if axis is None:
axis = 0
if isinstance(obj, Panel):
axis = 1
index = obj._get_axis(axis)
ind = get_anchor_index(index, freq)
bins = lib.generate_bins_dt64(index.asi8, ind.asi8, closed='right')
labels = ind[1:]
grouper = | BinGrouper(bins, labels) | pandas.core.groupby.BinGrouper |
import warnings
from pathlib import Path
import numpy as np
import pandas as pd
from pycox.datasets._dataset_loader import _DatasetLoader, _PATH_DATA
class _DatasetKKBoxChurn(_DatasetLoader):
"""KKBox churn data set obtained from Kaggle (WSDM - KKBox's Churn Prediction Challenge 2017).
https://www.kaggle.com/c/kkbox-churn-prediction-challenge/data
This is the version of the data set presented by Kvamme et al. (2019) [1], but the preferred version
is the `kkbox` version which included administrative censoring labels and and extra categorical variable.
Requires installation of the Kaggle API (https://github.com/Kaggle/kaggle-api),
with credentials (https://github.com/Kaggle/kaggle-api).
The data set contains churn information from KKBox, an Asian music streaming service. Churn is
defined by a customer failing to obtain a new valid service subscription within 30 days after
the current membership expires.
This version of the data set only consider part of the information made available in the challenge,
as it is intended to compare survival methods, and not compete in the challenge.
The data set is split in train, test and validations, based on an individual's id ('msno').
Variables:
msno:
Identifier for individual. An individual might churn multiple times.
event:
Churn indicator, 1: churn, 0: censoring.
n_prev_churns:
Number of previous churns by the individual.
(log_)days_between_subs:
Number of days between this and the last subscription (log-transformed), if previously
churned.
duration:
Durations until churn or censoring.
(log_)days_since_reg_init:
Number of days since first registration (log-transformed).
(log_)payment_plan_days:
Number of days until current subscription expires (log-transformed).
(log_)plan_list_price:
Listed price of current subscription (log-transformed).
(log_)actual_amount_paid:
The amount payed for the subscription (log-transformed).
is_auto_renew:
Not explained in competition https://www.kaggle.com/c/kkbox-churn-prediction-challenge/data
is_cancel:
If the customer has canceled the subscription. Subscription cancellation does not imply the
user has churned. A user may cancel service subscription due to change of service plans or
other reasons.
city:
City of customer.
gender:
Gender of customer.
registered_via:
Registration method.
age_at_start:
Age at beginning of subscription.
strange_age:
Indicator for strange ages.
nan_days_since_reg_init:
Indicator that we don't know when the customer first subscribed.
no_prev_churns:
Indicator if the individual has not previously churned.
References:
[1] <NAME>, <NAME>, and <NAME>. Time-to-event prediction with neural networks
and Cox regression. Journal of Machine Learning Research, 20(129):1–30, 2019.
http://jmlr.org/papers/v20/18-424.html
"""
name = 'kkbox_v1'
_checksum = '705ca57c7efd2d916f0da2dd6e3a399b3b279773271d595c2f591fcf7bb7cae6'
def __init__(self):
self._path_dir = _PATH_DATA / self.name
self.path_train = self._path_dir / 'train.feather'
self.path_test = self._path_dir / 'test.feather'
self.path_val = self._path_dir / 'val.feather'
self.path_survival = self._path_dir / 'survival_data.feather'
self.log_cols = ['actual_amount_paid', 'days_between_subs', 'days_since_reg_init',
'payment_plan_days', 'plan_list_price']
def read_df(self, subset='train', log_trans=True):
"""Get train, test, val or general survival data set.
The columns: 'duration' and 'event' gives the duration time and event indicator.
The survival data set contains no covariates, but can be useful for extending
the dataset with more covariates from Kaggle.
Keyword Arguments:
subset {str} -- Which subset to use ('train', 'val', 'test').
Can also set 'survival' which will give df with survival information without
covariates. (default: {'train'})
log_trans {bool} -- If covariates in 'kkbox_v1.log_cols' (from Kvamme paper) should be
transformed with 'z = log(x - min(x) + 1)'. (default: {True})
"""
if subset == 'train':
path = self.path_train
elif subset == 'test':
path = self.path_test
elif subset == 'val':
path = self.path_val
elif subset == 'survival':
path = self.path_survival
return pd.read_feather(path)
else:
raise ValueError(f"Need 'subset' to be 'train', 'val', or 'test'. Got {subset}")
if not path.exists():
print(f"""
The KKBox dataset not locally available.
If you want to download, call 'kkbox_v1.download_kkbox()', but note that
this might take around 10 min!
NOTE: You need Kaggle credentials! Follow instructions at
https://github.com/Kaggle/kaggle-api#api-credentials
""")
return None
def log_min_p(col, df):
x = df[col]
min_ = -1. if col == 'days_since_reg_init' else 0.
return np.log(x - min_ + 1)
df = pd.read_feather(path)
if log_trans:
df = df.assign(**{col: log_min_p(col, df) for col in self.log_cols})
df = df.rename(columns={col: f"log_{col}" for col in self.log_cols})
return df
def download_kkbox(self):
"""Download KKBox data set.
This is likely to take around 10 min!!!
NOTE: You need Kaggle credentials! Follow instructions at
https://github.com/Kaggle/kaggle-api#api-credentials
"""
self._download()
def _download(self):
self._setup_download_dir()
self._7z_from_kaggle()
self._csv_to_feather_with_types()
print('Creating survival data...')
self._make_survival_data()
print('Creating covariates...')
self._make_survival_covariates()
print('Creating train/test/val subsets...')
self._make_train_test_split()
print('Cleaning up...')
self._clean_up()
print("Done! You can now call `df = kkbox.read_df()`.")
def _setup_download_dir(self):
if self._path_dir.exists():
self._clean_up()
else:
self._path_dir.mkdir()
def _7z_from_kaggle(self):
import subprocess
import py7zr
try:
import kaggle
except OSError as e:
raise OSError(
f""""
Need to provide Kaggle credentials to download this data set. See guide at
https://github.com/Kaggle/kaggle-api#api-credentials.
"""
)
files = ['train', 'transactions', 'members_v3']
print('Downloading from Kaggle...')
for file in files:
kaggle.api.competition_download_file('kkbox-churn-prediction-challenge', file + '.csv.7z',
path=self._path_dir, force=True)
for file in files:
print(f"Extracting '{file}'...")
archive = py7zr.SevenZipFile(self._path_dir / f"{file}.csv.7z", mode="r")
archive.extractall(path=self._path_dir)
archive.close()
print(f"Finished extracting '{file}'.")
def _csv_to_feather_with_types(self):
print("Making feather data frames...")
file = 'train'
pd.read_csv(self._path_dir / f"{file}.csv").to_feather(self._path_dir / f"{file}_raw.feather")
file = 'members'
members = pd.read_csv(self._path_dir / f"{file}_v3.csv",
parse_dates=['registration_init_time'])
(members.assign(**{col: members[col].astype('category')
for col in ['city', 'registered_via', 'gender']})
.to_feather(self._path_dir / f"{file}.feather"))
file = 'transactions'
trans = pd.read_csv(self._path_dir / f"{file}.csv", parse_dates=['transaction_date', 'membership_expire_date'])
(trans.assign(**{col: trans[col].astype('category') for col in ['payment_method_id', 'is_auto_renew', 'is_cancel']})
.to_feather(self._path_dir / f"{file}.feather"))
def _make_survival_data(self):
"""Combine the downloaded files and create a survival data sets
(more or less without covariates).
A customer is considered churned if one of the following is true:
- If it has been more than 30 days since the expiration data of a membership subscription until the next transaction.
- If the customer has expiration in before 2017-03-01, and no transaction after that.
"""
train = pd.read_feather(self._path_dir / 'train_raw.feather')
members = pd.read_feather(self._path_dir / 'members.feather')
trans = (pd.read_feather(self._path_dir / 'transactions.feather')
[['msno', 'transaction_date', 'membership_expire_date', 'is_cancel']])
last_churn_date = '2017-01-29' # 30 days before last transactions are made in the dataset.
# Churn: More than 30 days before reentering
def days_without_membership(df):
diff = (df['next_trans_date'] - df['membership_expire_date']).dt.total_seconds()
return diff / (60 * 60 * 24)
trans = (trans
.sort_values(['msno', 'transaction_date'])
.assign(next_trans_date=(lambda x: x.groupby('msno')['transaction_date'].shift(-1)))
.assign(churn30=lambda x: days_without_membership(x) > 30))
# Remove entries with membership_expire_date < transaction_date
trans = trans.loc[lambda x: x['transaction_date'] <= x['membership_expire_date']]
assert (trans.loc[lambda x: x['churn30']==True].groupby(['msno', 'transaction_date'])['msno'].count().max() == 1)
# Churn: Leaves forever
trans = (trans
.assign(max_trans_date=lambda x: x.groupby('msno')['transaction_date'].transform('max'))
.assign(final_churn=(lambda x:
(x['max_trans_date'] <= last_churn_date) &
(x['transaction_date'] == x['max_trans_date']) &
(x['membership_expire_date'] <= last_churn_date)
)))
# Churn: From training set
trans = (trans
.merge(train, how='left', on='msno')
.assign(train_churn=lambda x: x['is_churn'].fillna(0).astype('bool'))
.drop('is_churn', axis=1)
.assign(train_churn=lambda x: (x['max_trans_date'] == x['transaction_date']) & x['train_churn'])
.assign(churn=lambda x: x['train_churn'] | x['churn30'] | x['final_churn']))
# Split individuals on churn
trans = (trans
.join(trans
.sort_values(['msno', 'transaction_date'])
.groupby('msno')[['churn30', 'membership_expire_date']].shift(1)
.rename(columns={'churn30': 'new_start', 'membership_expire_date': 'prev_mem_exp_date'})))
def number_of_new_starts(df):
return (df
.assign(new_start=lambda x: x['new_start'].astype('float'))
.sort_values(['msno', 'transaction_date'])
.groupby('msno')
['new_start'].cumsum().fillna(0.)
.astype('int'))
def days_between_subs(df):
diff = (df['transaction_date'] - df['prev_mem_exp_date']).dt
diff = diff.total_seconds() / (60 * 60 * 24)
df = df.assign(days_between_subs=diff)
df.loc[lambda x: x['new_start'] != True, 'days_between_subs'] = np.nan
return df['days_between_subs']
trans = (trans
.assign(n_prev_churns=lambda x: number_of_new_starts(x),
days_between_subs=lambda x: days_between_subs(x)))
# Set start times
trans = (trans
.assign(start_date=trans.groupby(['msno', 'n_prev_churns'])['transaction_date'].transform('min'))
.assign(first_churn=lambda x: (x['n_prev_churns'] == 0) & (x['churn'] == True)))
# Get only last transactions (per chrun)
indivs = (trans
.assign(censored=lambda x: x.groupby('msno')['churn'].transform('sum') == 0)
.assign(last_censored=(lambda x:
(x['censored'] == True) &
(x['transaction_date'] == x['max_trans_date'])
))
.loc[lambda x: x['last_censored'] | x['churn']]
.merge(members[['msno', 'registration_init_time']], how='left', on='msno'))
def time_diff_days(df, last, first):
return (df[last] - df[first]).dt.total_seconds() / (60 * 60 * 24)
indivs = (indivs
.assign(time=lambda x: time_diff_days(x, 'membership_expire_date', 'start_date'),
days_since_reg_init=lambda x: time_diff_days(x, 'start_date', 'registration_init_time')))
# When multiple transactions on last day, remove all but the last
indivs = indivs.loc[lambda x: x['transaction_date'] != x['next_trans_date']]
assert indivs.shape == indivs.drop_duplicates(['msno', 'transaction_date']).shape
assert (indivs['churn'] != indivs['censored']).all()
# Clean up and remove variables that are not from the first transaction day
dropcols = ['transaction_date', 'is_cancel', 'next_trans_date', 'max_trans_date', 'prev_mem_exp_date',
'censored', 'last_censored', 'churn30', 'final_churn', 'train_churn', 'membership_expire_date']
indivs = (indivs
.assign(churn_type=lambda x: 1*x['churn30'] + 2*x['final_churn'] + 4*x['train_churn'])
.assign(churn_type=lambda x:
np.array(['censoring', '30days', 'final', '30days_and_final', 'train', 'train_and_30',
'train_and_final', 'train_30_and_final'])[x['churn_type']])
.drop(dropcols, axis=1))
indivs = indivs.loc[lambda x: x['churn_type'] != 'train_30_and_final']
indivs = indivs.loc[lambda x: x['time'] > 0]
def as_category(df, columns):
return df.assign(**{col: df[col].astype('category') for col in columns})
def as_int(df, columns):
return df.assign(**{col: df[col].astype('int') for col in columns})
indivs = (indivs
.pipe(as_int, ['time'])
.pipe(as_category, ['new_start', 'churn_type']))
# indivs.reset_index(drop=True).to_feather(self._path_dir / 'survival_data.feather')
indivs.reset_index(drop=True).to_feather(self.path_survival)
def _make_survival_covariates(self):
# individs = pd.read_feather(self._path_dir / 'survival_data.feather')
individs = pd.read_feather(self.path_survival)
members = pd.read_feather(self._path_dir / 'members.feather')
trans = (individs
.merge(pd.read_feather(self._path_dir / 'transactions.feather'),
how='left', left_on=['msno', 'start_date'], right_on=['msno', 'transaction_date'])
.drop('transaction_date', axis=1) # same as start_date
.drop_duplicates(['msno', 'start_date'], keep='last') # keep last transaction on start_date (by idx)
)
assert trans.shape[0] == individs.shape[0]
def get_age_at_start(df):
fixed_date = | pd.datetime(2017, 3, 1) | pandas.datetime |
# --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#Code starts here
data= pd.read_csv(path)
data['Rating'].hist()
data=data[data['Rating'] <= 5]
data['Rating'].hist()
#Code ends here
# --------------
# code starts here
total_null=data.isnull().sum()
percent_null=(total_null/data.isnull().count())
missing_data= | pd.concat([total_null , percent_null], keys=['Total','Percent'], axis=1) | pandas.concat |
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
import cea.config
import cea.globalvar
import cea.inputlocator
from cea.optimization.constants import SIZING_MARGIN
from cea.technologies.solar.photovoltaic import calc_Cinv_pv, calc_Crem_pv
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def supply_system_configuration(generation, individual, locator, output_type_network, config):
district_supply_sys_columns = ['Lake_kW', 'VCC_LT_kW', 'VCC_HT_kW', 'single_effect_ACH_LT_kW',
'single_effect_ACH_HT_kW', 'DX_kW', 'CHP_CCGT_thermal_kW', 'SC_FP_m2', 'SC_ET_m2',
'PV_m2', 'Storage_thermal_kW', 'CT_kW', 'Capex_Centralized', 'Opex_Centralized',
'Capex_Decentralized', 'Opex_Decentralized']
district_supply_sys = pd.DataFrame(columns=district_supply_sys_columns)
# get supply system configuration of a particular individual
all_individuals = pd.read_csv(locator.get_optimization_all_individuals())
individual_system_configuration = all_individuals.loc[
(all_individuals['generation'].isin([generation])) & all_individuals['individual'].isin([individual])]
if output_type_network == "DH":
raise ValueError('This function is not ready for DH yet.')
# TODO: update the results from optimization for DH (not available at the moment)
network_name = 'DHN'
network_connected_buildings, decentralized_buildings = calc_building_lists(individual_system_configuration,
network_name)
centralized_cost_detail_heating = pd.read_csv(
locator.get_optimization_slave_investment_cost_detailed(individual, generation))
if output_type_network == "DC":
network_name = 'DCN'
network_connected_buildings, decentralized_buildings = calc_building_lists(individual_system_configuration,
network_name)
# get centralized system
cen_cooling_sys_detail = calc_cen_supply_sys_cooling(generation, individual, district_supply_sys_columns,
locator)
district_supply_sys = district_supply_sys.append(cen_cooling_sys_detail)
# get supply systems at decentralized buildings
for building in decentralized_buildings:
bui_cooling_sys_config = calc_building_supply_system(individual_system_configuration, network_name)
district_supply_sys = district_supply_sys.append(
calc_bui_sys_decentralized(building, bui_cooling_sys_config, district_supply_sys_columns, locator,
config))
# get supply systems at network connected buildings
for building in network_connected_buildings:
district_supply_sys = district_supply_sys.append(
calc_bui_sys_network_connected(building, district_supply_sys_columns, locator, config))
building_connectivity = pd.DataFrame({"Name": network_connected_buildings + decentralized_buildings,
"Type": ["CENTRALIZED" for x in network_connected_buildings] +
["DECENTRALIZED" for x in decentralized_buildings]})
return district_supply_sys, building_connectivity
def calc_bui_sys_network_connected(building, district_supply_sys_columns, locator, config):
bui_sys_detail = pd.DataFrame(columns=district_supply_sys_columns, index=[building])
bui_sys_detail = bui_sys_detail.fillna(0.0)
Capex_a_PV, Opex_a_PV, PV_installed_area_m2 = calc_pv_costs(building, config, locator)
bui_sys_detail.loc[building, 'PV_m2'] = PV_installed_area_m2
bui_sys_detail.loc[building, 'Capex_Decentralized'] = Capex_a_PV
bui_sys_detail.loc[building, 'Opex_Decentralized'] = Opex_a_PV
return bui_sys_detail
def calc_cen_costs_cooling(generation, individual, locator):
cooling_costs = pd.read_csv(
locator.get_optimization_slave_investment_cost_detailed_cooling(individual, generation))
capex_a_columns = [item for item in cooling_costs.columns if 'Capex_a' in item]
cen_capex_a = cooling_costs[capex_a_columns].sum(axis=1).values[0]
opex_a_columns = [item for item in cooling_costs.columns if 'Opex' in item]
cen_opex_a = cooling_costs[opex_a_columns].sum(axis=1).values[0]
return cen_capex_a, cen_opex_a
def calc_cen_supply_sys_cooling(generation, individual, district_supply_sys_columns, locator):
cooling_activation_column = ['Q_from_ACH_W', 'Q_from_Lake_W', 'Q_from_VCC_W', 'Q_from_VCC_backup_W',
'Q_from_storage_tank_W', 'Qc_CT_associated_with_all_chillers_W',
'Qh_CCGT_associated_with_absorption_chillers_W']
cooling_activation_pattern = pd.read_csv(
locator.get_optimization_slave_cooling_activation_pattern(individual, generation))
cen_cooling_sys = cooling_activation_pattern[cooling_activation_column].max()
cen_cooling_sys_detail = pd.DataFrame(columns=district_supply_sys_columns, index=['Centralized Plant'])
cen_cooling_sys_detail = cen_cooling_sys_detail.fillna(0.0)
cen_cooling_sys_detail.loc['Centralized Plant', 'Lake_kW'] = cen_cooling_sys['Q_from_Lake_W'] / 1000 # to kW
cen_cooling_sys_detail.loc['Centralized Plant', 'VCC_LT_kW'] = (cen_cooling_sys['Q_from_VCC_W'] + cen_cooling_sys[
'Q_from_VCC_backup_W']) / 1000 # to kW
cen_cooling_sys_detail.loc['Centralized Plant', 'single_effect_ACH_HT_kW'] = cen_cooling_sys[
'Q_from_ACH_W'] / 1000 # to kW
cen_cooling_sys_detail.loc['Centralized Plant', 'Storage_thermal_kW'] = cen_cooling_sys[
'Q_from_storage_tank_W'] / 1000 # to kW
cen_cooling_sys_detail.loc['Centralized Plant', 'CT_kW'] = cen_cooling_sys[
'Qc_CT_associated_with_all_chillers_W'] / 1000 # to kW
cen_cooling_sys_detail.loc['Centralized Plant', 'CHP_CCGT_thermal_kW'] = cen_cooling_sys[
'Qh_CCGT_associated_with_absorption_chillers_W'] / 1000 # to kW
cen_capex_a, cen_opex_a = calc_cen_costs_cooling(generation, individual, locator)
cen_cooling_sys_detail.loc['Centralized Plant', 'Capex_Centralized'] = cen_capex_a
cen_cooling_sys_detail.loc['Centralized Plant', 'Opex_Centralized'] = cen_opex_a
return cen_cooling_sys_detail
# def calc_cen_supply_sys_electricity(network_name, generation, individual, locator):
# if network_name == 'DCN':
# el_activation_columns = ['Area_PV_m2', 'E_CHP_to_directload_W', 'E_CHP_to_grid_W', 'E_PV_W', 'E_from_grid_W']
# el_activation_pattern = pd.read_csv(
# locator.get_optimization_slave_electricity_activation_pattern_cooling(individual, generation))
# el_sys_activation_pattern = el_activation_pattern[el_activation_columns]
# el_sys_activation_pattern['E_CHP_W'] = el_activation_pattern['E_CHP_to_directload_W'] + el_activation_pattern[
# 'E_CHP_to_grid_W']
# el_sys_activation_pattern.drop('E_CHP_to_directload_W', axis=1, inplace=True)
# el_sys_activation_pattern.drop('E_CHP_to_grid_W', axis=1, inplace=True)
# cen_el_supply_sys = el_sys_activation_pattern.max()
# elif network_name == 'DHN':
# el_activation_columns = ['Area_PV_m2', 'E_CHP_to_directload_W', 'E_CHP_to_grid_W', 'E_PV_W', 'E_from_grid_W']
# el_activation_pattern = pd.read_csv(
# locator.get_optimization_slave_electricity_activation_pattern_heating(individual, generation))
# el_sys_activation_pattern = el_activation_pattern[el_activation_columns]
# el_sys_activation_pattern['E_CHP_W'] = el_activation_pattern['E_CHP_to_directload_W'] + el_activation_pattern[
# 'E_CHP_to_grid_W']
# el_sys_activation_pattern.drop('E_CHP_to_directload_W', axis=1, inplace=True)
# el_sys_activation_pattern.drop('E_CHP_to_grid_W', axis=1, inplace=True)
# cen_el_supply_sys = el_sys_activation_pattern.max()
# else:
# raise ValueError('Wrong network_name')
#
# return cen_el_supply_sys
def calc_building_lists(individual_system_configuration, network_name):
buildings_list = [item for item in individual_system_configuration.columns if network_name in item and 'B' in item]
buildings_df = individual_system_configuration[buildings_list]
network_connected_buildings = buildings_df[buildings_df[buildings_list] > 0.00000].dropna(axis=1).columns
network_connected_buildings = [x.split()[0] for x in network_connected_buildings]
decentralized_buildings = buildings_df[buildings_df[buildings_list] < 1.00000].dropna(axis=1).columns
decentralized_buildings = [x.split()[0] for x in decentralized_buildings]
return network_connected_buildings, decentralized_buildings
def calc_building_supply_system(individual_system_configuration, network_name):
all_configuration_dict = {1: "ARU_SCU", 2: "AHU_SCU", 3: "AHU_ARU", 4: "SCU", 5: "ARU", 6: "AHU", 7: "AHU_ARU_SCU"}
unit_configuration_name = network_name + ' unit configuration'
unit_configuration = int(individual_system_configuration[unit_configuration_name].values[0])
if unit_configuration in all_configuration_dict:
decentralized_config = all_configuration_dict[unit_configuration]
else:
raise ValueError('DCN unit configuration does not exist.')
return decentralized_config
def calc_bui_sys_decentralized(building, bui_sys_config, district_supply_sys_columns, locator, config):
# get nominal power and costs from disconnected calculation
bui_results = pd.read_csv(
locator.get_optimization_decentralized_folder_building_result_cooling(building, bui_sys_config))
bui_results_best = bui_results[bui_results['Best configuration'] > 0.0]
technology_columns = [item for item in bui_results_best.columns if 'Nominal Power' in item]
cost_columns = [item for item in bui_results_best.columns if 'Costs' in item]
technology_columns.extend(cost_columns)
bui_results_best = bui_results_best[technology_columns].reset_index(drop=True)
# write building system configuration to output
bui_sys_detail = | pd.DataFrame(columns=district_supply_sys_columns, index=[building]) | pandas.DataFrame |
import sys
import matplotlib.pyplot as plt
import pandas as pd
import seaborn
def print_as_comment(obj):
print("\n".join(f"# {line}" for line in str(obj).splitlines()))
if __name__ == "__main__":
sys.path.append("../..")
seaborn.set_style("whitegrid")
# ---
import pandas as pd
import epymetheus as ep
from epymetheus.benchmarks import dumb_strategy
# ---
my_strategy = ep.create_strategy(dumb_strategy, profit_take=20.0, stop_loss=-10.0)
# ---
from epymetheus.datasets import fetch_usstocks
universe = fetch_usstocks()
print(">>> universe.head()")
print_as_comment(universe.head())
print(">>> my_strategy.run(universe)")
my_strategy.run(universe)
# ---
df_history = my_strategy.history()
df_history.head()
print(">>> df_history.head()")
print_as_comment(df_history.head())
# ---
series_wealth = my_strategy.wealth()
print(">>> series_wealth.head()")
print_as_comment(series_wealth.head())
plt.figure(figsize=(16, 4))
plt.plot(series_wealth, linewidth=1)
plt.xlabel("date")
plt.ylabel("wealth [USD]")
plt.title("Wealth")
plt.savefig("wealth.png", bbox_inches="tight", pad_inches=0.1)
# ---
print(">>> my_strategy.score('final_wealth')")
print_as_comment(my_strategy.score("final_wealth"))
print(">>> my_strategy.score('max_drawdown')")
print_as_comment(my_strategy.score("max_drawdown"))
# my_strategy.score("sharpe_ratio")
# ---
drawdown = my_strategy.drawdown()
exposure = my_strategy.net_exposure()
plt.figure(figsize=(16, 4))
plt.plot(pd.Series(drawdown, index=universe.index), linewidth=1)
plt.xlabel("date")
plt.ylabel("drawdown [USD]")
plt.title("Drawdown")
plt.savefig("drawdown.png", bbox_inches="tight", pad_inches=0.1)
plt.figure(figsize=(16, 4))
plt.plot( | pd.Series(exposure, index=universe.index) | pandas.Series |
import logging
import os
import gc
import pandas as pd
from src.data_models.tdidf_model import FrequencyModel
from src.evaluations.statisticalOverview import StatisticalOverview
from src.globalVariable import GlobalVariable
from src.kemures.tecnics.content_based import ContentBased
from src.preprocessing.preprocessing import Preprocessing
def execute_one_time():
Preprocessing.database_evaluate_graph()
scenario = GlobalVariable.ONE_SCENARIO_SIZE
scenario_class_df = pd.DataFrame()
scenario_results_df = pd.DataFrame()
for run in range(GlobalVariable.RUN_TIMES):
os.system('cls||clear')
logger.info("+ Rodada " + str(run + 1))
logger.info("+ Carregando o Cenário com " + str(scenario))
songs_base_df, users_preference_base_df = Preprocessing.load_data_test(scenario)
run_class_df, run_results_df = ContentBased.run_recommenders(
users_preference_base_df, FrequencyModel.mold(songs_base_df), scenario, run + 1
)
scenario_results_df = pd.concat([scenario_results_df, run_results_df])
scenario_class_df = pd.concat([scenario_class_df, run_class_df])
StatisticalOverview.result_info(scenario_results_df)
StatisticalOverview.graphics(scenario_results_df)
os.system('cls||clear')
StatisticalOverview.comparate(scenario_results_df)
def execute_by_scenarios():
Preprocessing.database_evaluate_graph()
application_class_df = pd.DataFrame()
application_results_df = pd.DataFrame()
for scenario in GlobalVariable.SCENARIO_SIZE_LIST:
gc.collect()
scenario_class_df = pd.DataFrame()
scenario_results_df = pd.DataFrame()
for run in range(GlobalVariable.RUN_TIMES):
os.system('cls||clear')
logger.info("+ Rodada " + str(run + 1))
logger.info("+ Carregando o Cenário com " + str(scenario))
songs_base_df, users_preference_base_df = Preprocessing.load_data_test(scenario)
run_class_df, run_results_df = ContentBased.run_recommenders(
users_preference_base_df, FrequencyModel.mold(songs_base_df), scenario, run + 1
)
scenario_results_df = pd.concat([scenario_results_df, run_results_df])
scenario_class_df = pd.concat([scenario_class_df, run_class_df])
StatisticalOverview.result_info(scenario_results_df)
StatisticalOverview.graphics(scenario_results_df)
os.system('cls||clear')
StatisticalOverview.comparate(scenario_results_df)
application_results_df = pd.concat([scenario_results_df, application_results_df])
application_class_df = | pd.concat([scenario_class_df, application_class_df]) | pandas.concat |
from contextlib import contextmanager
from time import time, sleep
from .datasets import timeseries
from .ops import spatial_mean, temporal_mean, climatology, anomaly
from distributed import wait
from distributed.utils import format_bytes
import datetime
from distributed import Client
import pandas as pd
import logging
import os
logger = logging.getLogger()
logger.setLevel(level=logging.WARNING)
here = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
results_dir = os.path.join(here, "results")
class DiagnosticTimer:
def __init__(self):
self.diagnostics = []
@contextmanager
def time(self, **kwargs):
tic = time()
yield
toc = time()
kwargs["runtime"] = toc - tic
self.diagnostics.append(kwargs)
def dataframe(self):
return | pd.DataFrame(self.diagnostics) | pandas.DataFrame |
#!/usr/bin/python3
"""
This module is supposed to take care of visualizational
concerns especially if it is about plotting data
"""
from gisme import (figure_path, lon_col, lat_col, de_load, bbox,
variable_dictionary, data_path, nuts3_01res_shape,
nuts0_shape, isin_path, log, demography_file)
from gisme.WeatherReader import WeatherReader
from gisme.LoadReader import LoadReader
from gisme.Predictions import ARMAXForecast
from gisme.Utility import Utility
import os
import itertools
import pandas as pd
import numpy as np
import shapefile as shp
from datetime import datetime,timedelta
from descartes import PolygonPatch
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from matplotlib import pyplot as plt, colors, cm, markers, rc, rcParams
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
# ## for Palatino and other serif fonts use:
# #rc('font',**{'family':'serif','serif':['Palatino']})
# rc('text',usetex=True)
# plt.rc('text',usetex=True)
# plt.rc('font',family='serif')
class DataPlotter:
"""
handles plotting for load and weather data
Attributes
----------
fmt : string
preferred output format of plots such as 'pdf', 'eps', 'jpg' or
similar supported by matplotlib
save : boolean
whether to save plot or not
show : boolean
whether to display plot or not
shape : integer tuple
for map multiplot, specify arrangement as tuple of length 2
isin : boolean
for map plot, whether to filter by isinDE mask
wreader : WeatherReader
used to load weather data
lreader : LoadReader
used to load load data
"""
def __init__(self, fmt='pdf', save=True, show=False, shape=None, isin=False):
"""Initializes WeatherPlot instance
Parameters
----------
fmt : string
format that figure is saved to
save : boolean
whether to save plots to file or not
show : boolean
whether to show plots or not
shape : tuple
if multiplot should be plotted,specify shape
isin : boolean
whether to filter values from outside germany
Returns
-------
None
"""
self.fmt = fmt
self.save = save
self.show = show
self.shape = shape
self.isin = isin
self.wreader = WeatherReader(self.isin)
self.lreader = LoadReader()
# #change font size depending on size of plot
# if self.shape is not None:
# rcParams.update({'font.size': 18./np.round(np.sqrt(self.shape[0]*self.shape[1]))})
# else:
# rcParams.update({'font.size': 18.})
def __save_show_fig(self, fig, dir_pth, file_name):
"""Save and show the passed figure if specified and finally close it
Parameters
----------
fig : matplotlib.figure.Figure
the created figure
dir_pth : string
the path to the directory to save to
file_name : string
the file name to save to
Returns
-------
None
"""
if self.save:
log.info(f'saving plot in {file_name}')
if not os.path.exists(dir_pth):
os.makedirs(dir_pth)
if type(self.fmt) is list:
for f in self.fmt:
fig.savefig(f'{file_name}.{f}', bbox_inches='tight', format=f, optimize=True, dpi=150)
else:
fig.savefig(f'{file_name}.{self.fmt}', bbox_inches='tight', format=self.fmt, optimize=True, dpi=150)
if self.show:
plt.show()
# close figures as they won't be closed automatically by python during runtime
plt.close(fig)
def __create_ax_map(self, ax, variable, time, norm, xlbl_true=None, ylbl_true=None):
"""Plot a map of germany on the given axis
Parameters
----------
ax : matplotlib.axes.Axes
the axis used to plot
variable : string
the name of the weather variable
time : datetime.datetime
the time for which to plot the variable
norm : matplotlib.colors.BoundaryNorm
the norm for color distribution
xlbl_true : bool
wether to plot a label for the x-axis or not
ylbl_true : bool
wether to plot a label for the y-axis or not
Returns
-------
None
"""
if self.isin:
data = self.wreader.vals4time(variable, time, isin=True)
data.plot.imshow(ax=ax, cmap='jet', extent=bbox, norm=norm, add_colorbar=False)
else:
data = self.wreader.vals4time(variable, time)
ax.imshow(data.values, cmap='jet', extent=bbox, interpolation='bilinear', norm=norm)
# read shapefile
eu_shape = shp.Reader(nuts0_shape)
de_shape = None
for record in eu_shape.shapeRecords():
if 'DE' in record.record:
de_shape = record
break
if de_shape is None:
raise Exception('shape for germany could not be found!')
# concatenate points so that single lines can be drawn
state = de_shape.shape
points = np.array(state.points)
intervals = list(state.parts) + [len(state.points)]
for (x, y) in zip(intervals[:-1], intervals[1:]):
ax.plot(*zip(*points[x:y]), color='k', linewidth=2)
ax.set_title(pd.to_datetime(time).strftime("%Y/%m/%d %HH"))
# print x and y label only if is most left/lowest plot
if xlbl_true:
ax.set_xlabel(lon_col)
else:
ax.set_xlabel(None)
if ylbl_true:
ax.set_ylabel(lat_col)
else:
ax.set_ylabel(None)
# set ticks for x and y in order to display the grid
xticks = np.linspace(bbox[0], bbox[1], (bbox[1] - bbox[0]) * 4 + 1)
ax.set_xticks(xticks, minor=True)
yticks = np.linspace(bbox[2], bbox[3], (bbox[3] - bbox[2]) * 4 + 1)
ax.set_yticks(yticks, minor=True)
# plot own grid
xgrid = np.linspace(bbox[0]+.125, bbox[1] - .125, num=(bbox[1] - bbox[0]) * 4)
ygrid = np.linspace(bbox[2]+.125, bbox[3] - .125, num=(bbox[3] - bbox[2]) * 4)
for xpoint in xgrid:
ax.axvline(xpoint, alpha=.2, color='k', linewidth=.5, linestyle='--')
for ypoint in ygrid:
ax.axhline(ypoint, alpha=.2, color='k', linewidth=.5, linestyle='--')
def __plot_days(self, days, fname):
"""Plot data for each day in days list and save file with specified format
about file name format for single days:
the leading number indicates the position in terms of min/max
--> for min,0 means it's the smallest value,
for max,the highest number corresponds to the highest value
Parameters
----------
days : list
list of days
fname : string
to name folder
Returns
-------
None
"""
assert (self.shape is None or self.shape[0]*self.shape[1] == len(days)),\
"shape of plot must fit with number of plots"
var = days.name
cm_jet = cm.get_cmap('jet')
vmin, vmax = self.wreader.get_minmax(var)
norm = colors.BoundaryNorm(np.linspace(vmin, vmax, 256), ncolors=256)
cbox_ticks = np.linspace(vmin, vmax, 8)
smap = cm.ScalarMappable(norm=norm, cmap=cm_jet)
if self.shape is not None:
fig, axs = plt.subplots(*self.shape, constrained_layout=True)
day_list = list(days['time'].values)
for xcoord in range(0, self.shape[1]):
for ycoord in range(0, self.shape[0]):
ax = axs[xcoord, ycoord]
day = day_list.pop()
self.__create_ax_map(ax, var, day, norm, xcoord == (self.shape[1] - 1), ycoord == 0)
cbar = fig.colorbar(smap, ticks=cbox_ticks, ax=axs.ravel().tolist())
cbar.set_label(self.wreader.get_long_name(var))
dir_pth = os.path.join(figure_path, var, 'bundles')
file_name = os.path.join(dir_pth, f'{fname}{len(days)}_maps{"_isin" if self.isin else ""}')
self.__save_show_fig(fig, dir_pth, file_name)
else:
for day_num, day in enumerate(days["time"].values):
fig, ax = plt.subplots()
self.__create_ax_map(ax, var, day, norm, xlbl_true=True, ylbl_true=True)
cbar = fig.colorbar(smap, ticks=cbox_ticks)
cbar.set_label(self.wreader.get_long_name(var))
dir_pth = os.path.join(figure_path, var, fname)
file_name = os.path.join(dir_pth, f'{day_num}_map{"_isin" if self.isin else ""}')
self.__save_show_fig(fig, dir_pth, file_name)
def plot_nmin(self, var, n=4):
"""Plot/save the n days with the smallest values for the specified
variable reduced over longitude and latitude
Parameters
----------
var : string
name of variable
n : integer
number of plots
Returns
-------
None
"""
assert var in self.wreader.get_vars(), f"variable '{var}' not found"
days = self.wreader.nmin_val_days(var, n)
self.__plot_days(days, 'min')
def plot_nmax(self, var, n=4):
"""Plot/save the n days with the largest values for the specified
variable reduced over longitude and latitude
Parameters
----------
var : string
name of variable
n : integer
number of plots
Returns
-------
None
"""
assert var in self.wreader.get_vars(), f"variable '{var}' not found"
days = self.wreader.nmax_val_days(var, n)
self.__plot_days(days, 'max')
def plot_nmin_var(self, var, n=4):
"""Plot/save the n days with the smallest variance for the specified
variable reduced over longitude and latitude
Parameters
----------
var : string
name of variable
n : integer
number of plots
Returns
-------
None
"""
assert var in self.wreader.get_vars(), f"variable '{var}' not found"
days = self.wreader.nminvar_val_days(var, n)
self.__plot_days(days, 'minvar')
def plot_nmax_var(self, var, n=4):
"""Plot/save the n days with the largest variance for the specified
variable reduced over longitude and latitude
Parameters
----------
var : string
name of variable
n : integer
number of plots
Returns
-------
None
"""
assert var in self.wreader.get_vars(), f"variable '{var}' not found"
days = self.wreader.nmaxvar_val_days(var, n)
self.__plot_days(days, 'maxvar')
def plot_nmin_mean(self, var, n=4):
"""Plot/save the n days with the smallest mean for the specified
variable reduced over longitude and latitude
Parameters
----------
var : string
name of variable
n : integer
number of plots
Returns
-------
None
"""
assert var in self.wreader.get_vars(), f"variable '{var}' not found"
days = self.wreader.nminmean_val_days(var, n)
self.__plot_days(days, 'minmean')
def plot_nmax_mean(self, var, n=4):
"""Plot/save the n days with the largest mean for the specified
variable reduced over longitude and latitude
Parameters
----------
var : string
name of variable
n : integer
number of plots
Returns
-------
None
"""
assert var in self.wreader.get_vars(), f"variable '{var}' not found"
days = self.wreader.nmaxmean_val_days(var, n)
self.__plot_days(days, 'maxmean')
def plot_nmin_med(self, var, n=4):
"""Plot/save the n days with the smallest median for the specified
variable reduced over longitude and latitude
Parameters
----------
var : string
name of variable
n : integer
number of plots
Returns
-------
None
"""
assert var in self.wreader.get_vars(), f"variable '{var}' not found"
days = self.wreader.nminmed_val_days(var, n)
self.__plot_days(days, 'minmed')
def plot_nmax_med(self, var, n=4):
"""Plot/save the n days with the largest median for the specified
variable reduced over longitude and latitude
Parameters
----------
var : string
name of variable
n : integer
number of plots
Returns
-------
None
"""
assert var in self.wreader.get_vars(), f"variable '{var}' not found"
days = self.wreader.nmaxmed_val_days(var, n)
self.__plot_days(days, 'maxmed')
def plot_nmin_sum(self, var, n=4):
"""Plot/save the n days with the smallest sum for the specified
variable reduced over longitude and latitude
Parameters
----------
var : string
name of variable
n : integer
number of plots
Returns
-------
None
"""
assert var in self.wreader.get_vars(), f"variable '{var}' not found"
days = self.wreader.nminsum_val_days(var, n)
self.__plot_days(days, 'minsum')
def plot_nmax_sum(self, var, n=4):
"""Plot/save the n days with the largest sum for the specified
variable reduced over longitude and latitude
Parameters
----------
var : string
name of variable
n : integer
number of plots
Returns
-------
None
"""
assert var in self.wreader.get_vars(), f'variable "{var}" not found'
days = self.wreader.nmaxsum_val_days(var, n)
self.__plot_days(days, 'maxsum')
def plot_isin(self):
"""Plot map showing which grid points are within germany
Returns
-------
None
"""
try:
contained = np.load(os.path.join(isin_path, 'isinDE.npy'))
except:
log.info(f'isin file not found in {data_path}')
util = Utility()
contained = util.check_isinDE()
fig, ax = plt.subplots()
ax.imshow(contained, cmap=plt.cm.Greys, extent=bbox)
ax.set_ylabel(lat_col)
ax.set_xlabel(lon_col)
file_name = os.path.join(figure_path, 'isinDE')
self.__save_show_fig(fig, figure_path, file_name)
def plot_isin_region(self, region_id):
"""Plot map showing which grid points are within specified region
Parameters
----------
region_id : string
the id of the region as string
Returns
-------
None
"""
util = Utility()
try:
contained = np.load(os.path.join(isin_path, f'isin{region_id}.npy'))
except:
log.info(f'isin file not found in {isin_path} for region {region_id}')
contained = util.check_isin_region(region_id)
fig, ax = plt.subplots()
ax.imshow(contained, cmap=plt.cm.Greys, extent=bbox)
ax.set_ylabel(lat_col)
ax.set_xlabel(lon_col)
file_name = os.path.join(figure_path, f'isin{util.get_region_name(region_id)}_{region_id}')
self.__save_show_fig(fig, figure_path, file_name)
def plot_isin_top_n(self, n, year):
"""Plot map showing which grid points are within n regions with highest population for specified year
Parameters
----------
n : integer
number of regions with highest population to plot
year : integer
specifies for which year population is checked
Returns
-------
None
"""
util = Utility()
contained = util.demo_top_n_regions_map(n,2018)
fig, ax = plt.subplots()
ax.imshow(contained, cmap=plt.cm.Greys, extent=bbox)
ax.set_ylabel(lat_col)
ax.set_xlabel(lon_col)
file_name = os.path.join(figure_path, f'isin_top{n}_year{year}')
self.__save_show_fig(fig, figure_path, file_name)
def plot_demo4year(self, year):
"""Plot a map of germany showing regional population data on NUTS 3 level
Parameters
----------
year : int
name of variable
Returns
-------
None
"""
assert (year >= 2015 and year <=2018), "demography data only existing from 2015 to 2018"
demo_df = pd.read_csv(demography_file, encoding='latin1', index_col='GEO')
demo_df['Value'] = demo_df['Value'].map(lambda val: pd.NaT if val == ':' else float(val.replace(',', '')))
df = demo_df[demo_df['TIME'] == year]
with shp.Reader(nuts3_01res_shape) as nuts3_sf:
regions = [rec for rec in nuts3_sf.shapeRecords() if rec.record['CNTR_CODE'] == 'DE']
values = np.array([df.loc[region.record['NUTS_ID'], :]['Value'] for region in regions]) / 1000
_min = values.min()
_max = values.max()
fig, ax = plt.subplots()
plt.xlim([5.5, 15.5])
plt.ylim([47, 55.5])
ax.set_xlabel(lon_col)
ax.set_ylabel(lat_col)
# for logarithmic colorbar
cbox_bound = np.exp(np.linspace(np.log(_min), np.log(_max), 256))
norm = colors.BoundaryNorm(cbox_bound, ncolors=256)
sm = cm.ScalarMappable(norm=norm, cmap=cm.get_cmap('jet'))
cbar = plt.colorbar(sm)
cbar.set_label('inhabitants (in 1k)')
for value, region in zip(values, regions):
ax.add_patch(PolygonPatch(region.shape.__geo_interface__, fc=sm.to_rgba(value), ec='none'))
dir_pth = os.path.join(figure_path, 'demo')
file_name = os.path.join(dir_pth, f'demo{year}_logscale')
self.__save_show_fig(fig, dir_pth, file_name)
def plot_load_acf(self, lags=48, hour_steps=1, ndiff=0):
"""Plot autocorrelation plot of load data within given time range
and given lags, hour steps and number of differences
Parameters
----------
lags : integer
specifies number of lags shown on plot
hour_steps : integer
specifies time steps in data in hours
ndiff : integer
specifies how often to differentiate before plotting
Returns
-------
None
"""
data = self.lreader.vals4step(de_load, step=hour_steps).interpolate_na(dim='utc_timestamp', method='linear')\
.diff(dim='utc_timestamp', n=ndiff).values
fig = plot_acf(data, fft=True, use_vlines=True, lags=lags)
dir_pth = os.path.join(figure_path, 'ACF')
file_name = os.path.join(dir_pth, f'load_{lags}lags_ndiff{ndiff}_hstep{hour_steps}')
self.__save_show_fig(fig, dir_pth, file_name)
def plot_load_pacf(self, lags=48, hour_steps=1, ndiff=0):
"""Plot partial autocorrelation plot of load data within given time
range and given lags, hour steps and number of differneces
Parameters
----------
lags : integer
specifies number of lags shown on plot
hour_steps : integer
specifies time steps in data in hours
ndiff : integer
specifies how often to differentiate before plotting
Returns
-------
None
"""
data = self.lreader.vals4step(de_load, step=hour_steps).interpolate_na(dim='utc_timestamp', method='linear')\
.diff(dim='utc_timestamp', n=ndiff).values
fig = plot_pacf(data, use_vlines=True, lags=lags)
dir_pth = os.path.join(figure_path, 'PACF')
file_name = os.path.join(dir_pth, f'load_{lags}lags_ndiff{ndiff}_hstep{hour_steps}')
self.__save_show_fig(fig, dir_pth, file_name)
def plot_load_time_func(self, var, start, stop, func, load_col=de_load,
freq=24, aspect=(12, 5), skip_bottom_labels=False):
"""Plot/save function of load and date with variable
after applying given function to its data
Parameters
----------
var : string
name of variable to plot
start : pandas.Timestamp
starting time (e.g. start = pandas.Timestamp(datetime(2015,1,1,12),tz='utc'))
stop : pandas.Timestamp
stopping time
func : function object
function applied to weather data to reduce over longitude and latitude
load_col : string
specifies column in load file that will be plotted
freq : integer (where freq mod 2 == 0, as resolution of data is 2H)
specifies in what frequency of hours points will be plotted
aspect : tuple of ints
defines the aspect ratio of the plot
skip_bottom_labels : boolean
specifies whether to skip the bottom label or not
Returns
-------
None
"""
assert (freq % 2 == 0), "frequency must be dividable by 2 as resolution of data is 2h"
fname = func.__name__
rng = | pd.date_range(start, stop, freq=f'{freq}H') | pandas.date_range |
# -*- coding: utf-8 -*-
"""
This module holds Classes and Functions for solving linear optimisation
problems based on tabular data.
Please use this module with care. It is work in progress and properly
tested yet!
Contact: <NAME> <<EMAIL>>
SPDX-License-Identifier: MIT
"""
import datetime
import logging
import os
import pickle
import warnings
from copy import deepcopy
import oemof.solph as solph
import pandas as pd
from .external import Scenario
from .postprocessing import analyse_bus
from .postprocessing import analyse_costs
from .postprocessing import analyse_emissions
from .postprocessing import get_all_sequences
from .postprocessing import get_boundary_flows
from .postprocessing import get_trafo_flow
from .setup_model import add_buses
from .setup_model import add_links
from .setup_model import add_sinks
from .setup_model import add_sinks_fix
from .setup_model import add_sources
from .setup_model import add_sources_fix
from .setup_model import add_storages
from .setup_model import add_transformer
from .setup_model import check_active
from .setup_model import check_nonconvex_invest_type
from .setup_model import load_csv_data
class DistrictScenario(Scenario):
"""Scenario class for urban energy systems"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.input_path = kwargs.get("input_path", None)
self.emission_limit = kwargs.get("emission_limit", None)
self.location = kwargs.get("location", None)
self.number_of_time_steps = \
kwargs.get("number_of_time_steps", 10)
self.results = dict()
def load_csv(self, path=None):
if path is not None:
self.location = path
self.table_collection = load_csv_data(self.location)
return self
def check_input(self):
self.table_collection = check_active(self.table_collection)
self.table_collection = check_nonconvex_invest_type(
self.table_collection)
return self
def initialise_energy_system(self):
"""Initialises the oemof.solph Energysystem."""
date_time_index = pd.date_range(
"1/1/{0}".format(self.year),
periods=self.number_of_time_steps,
freq="H"
)
self.es = solph.EnergySystem(timeindex=date_time_index)
def create_nodes(self):
nd = self.table_collection
nod, busd = add_buses(nd['Bus'])
nod.extend(
add_sources(nd['Source'], busd, nd['Timeseries']) +
add_sources_fix(nd['Source_fix'], busd, nd['Timeseries']) +
add_sinks(nd['Sink'], busd, nd['Timeseries']) +
add_sinks_fix(nd['Sink_fix'], busd, nd['Timeseries']) +
add_storages(nd['Storages'], busd) +
add_transformer(nd['Transformer'], busd, nd['Timeseries'])
)
if 'Link' in nd.keys():
nod.extend(add_links(nd['Link'], busd))
return nod
def table2es(self):
if self.es is None:
self.initialise_energy_system()
self.check_input()
nodes = self.create_nodes()
self.es.add(*nodes)
def add_emission_constr(self):
if self.emission_limit is not None:
if self.model is not None:
solph.constraints.generic_integral_limit(
self.model, keyword='emission_factor',
limit=self.emission_limit)
else:
ValueError("The model must be created first.")
return self
def add_couple_invest_contr(self, couple_invest_flow):
"""
Adds a solph.contraint for coupling investment flows.
syntax of couple_invest_flow:
couple_invest_flow={
'flow1': ("label_from", "label_to"),
'flow2': ("label_from", "label_to"),
}
Make sure, that these flows are InvestmentFlows.
"""
flow1_from = self.es.groups[couple_invest_flow['flow1'][0]]
flow1_to = self.es.groups[couple_invest_flow['flow1'][1]]
investflow_1 = \
self.model.InvestmentFlow.invest[flow1_from, flow1_to]
flow2_from = self.es.groups[couple_invest_flow['flow2'][0]]
flow2_to = self.es.groups[couple_invest_flow['flow2'][1]]
investflow_2 = \
self.model.InvestmentFlow.invest[flow2_from, flow2_to]
solph.constraints.equate_variables(
self.model,
investflow_1,
investflow_2,
factor1=1,
name="couple_investment_flows"
)
def solve(self, with_duals=False, tee=True, logfile=None, solver=None,
couple_invest_flow=None, **kwargs):
if self.es is None:
self.table2es()
self.create_model()
self.add_emission_constr()
if couple_invest_flow is not None:
self.add_couple_invest_contr(couple_invest_flow)
logging.info("Optimising using {0}.".format(solver))
if with_duals:
self.model.receive_duals()
if self.debug:
filename = os.path.join(
solph.helpers.extend_basic_path("lp_files"), "q100opt.lp"
)
self.model.write(
filename, io_options={"symbolic_solver_labels": True}
)
logging.info("Store lp-file in {0}.".format(filename))
solver_kwargs = {
"solver_cmdline_options": kwargs.get(
"solver_cmdline_options", {})}
self.model.solve(
solver=solver, solve_kwargs={"tee": tee, "logfile": logfile},
**solver_kwargs
)
# store directly at district energy system
self.results["main"] = solph.processing.results(self.model)
self.results["meta"] = solph.processing.meta_results(self.model)
self.results["param"] = solph.processing.parameter_as_dict(self.es)
self.results["meta"]["scenario"] = self.scenario_info(solver)
if self.location is not None:
self.results["meta"]["in_location"] = self.location
self.results['meta']["datetime"] = datetime.datetime.now()
self.results["meta"]["solph_version"] = solph.__version__
self.results['meta']['emission_limit'] = self.emission_limit
self.results['meta']['solver']['solver'] = solver
self.results['costs'] = self.model.objective()
self.results['table_collection'] = self.table_collection
if hasattr(self.model, 'integral_limit_emission_factor'):
self.results['emissions'] = \
self.model.integral_limit_emission_factor()
self.results['timeindex'] = self.es.timeindex
def plot(self):
pass
def tables_to_csv(self, path=None):
"""Dump scenario into a csv-collection."""
if path is None:
bpath = os.path.join(os.path.expanduser("~"), ".q100opt")
if not os.path.isdir(bpath):
os.mkdir(bpath)
dpath = os.path.join(bpath, "dumps")
if not os.path.isdir(dpath):
os.mkdir(dpath)
path = os.path.join(dpath, "csv_export")
if not os.path.isdir(path):
os.mkdir(path)
for name, df in self.table_collection.items():
name = name.replace(" ", "_") + ".csv"
filename = os.path.join(path, name)
df.to_csv(filename)
logging.info("Scenario saved as csv-collection to {0}".format(path))
def tables_to_excel(self, dpath=None, filename=None):
"""Dump scenario into an excel-file."""
if dpath is None:
bpath = os.path.join(os.path.expanduser("~"), ".q100opt")
if not os.path.isdir(bpath):
os.mkdir(bpath)
dpath = os.path.join(bpath, "dumps")
if not os.path.isdir(dpath):
os.mkdir(dpath)
if filename is None:
filename = "ds_dump.xlsx"
writer = pd.ExcelWriter(os.path.join(dpath, filename))
for name, df in sorted(self.table_collection.items()):
df.to_excel(writer, name)
writer.save()
logging.info("Scenario saved as excel file to {0}".format(filename))
def dump(self, path=None, filename=None):
"""Dump results of District scenario."""
if path is None:
bpath = os.path.join(os.path.expanduser("~"), ".q100opt")
if not os.path.isdir(bpath):
os.mkdir(bpath)
dpath = os.path.join(bpath, "dumps")
if not os.path.isdir(dpath):
os.mkdir(dpath)
path = os.path.join(dpath, "energysystem")
if not os.path.isdir(path):
os.mkdir(path)
if filename is None:
filename = "ds_dump.oemof"
if not os.path.isdir(path):
os.makedirs(path)
dump_des = deepcopy(self)
if dump_des.model is not None:
setattr(dump_des, 'model', None)
if dump_des.es is not None:
setattr(dump_des, 'es', None)
pickle.dump(
dump_des.__dict__, open(os.path.join(path, filename), "wb")
)
logging.info("DistrictScenario dumped"
" to {} as {}".format(path, filename))
def restore(self, path=None, filename=None):
"""Restores a district energy system from dump."""
self.__dict__ = load_district_scenario(path, filename).__dict__
logging.info("DistrictEnergySystem restored.")
def analyse_results(self, heat_bus_label='b_heat',
elec_bus_label='b_elec'):
"""Calls all analysis methods."""
for label in [heat_bus_label, elec_bus_label]:
check_label(self.results['main'], label)
self.analyse_costs()
self.analyse_emissions()
self.analyse_kpi()
self.analyse_sequences()
self.results['sum'] = self.results['sequences'].sum()
self.analyse_boundary_flows()
self.analyse_heat_generation_flows(heat_bus_label=heat_bus_label)
self.analyse_heat_bus(heat_bus_label=heat_bus_label)
self.analyse_electricity_bus(elec_bus_label=elec_bus_label)
def analyse_costs(self):
"""Performs a cost analysis."""
if 'cost_analysis' not in self.results.keys():
self.results['cost_analysis'] = analyse_costs(
results=self.results
)
logging.info("Economic analysis completed.")
# check if objective and recalculation match
total_costs = self.results['cost_analysis']['all']['costs'].sum()
objective_value = self.results['meta']['objective']
if abs(total_costs - objective_value) > 0.01:
raise ValueError(
"The objective value and the re-calculated costs do not match!"
)
else:
logging.info(
"Check passed: Objective value and recalculated costs match."
)
return self.results['cost_analysis']
def analyse_emissions(self):
"""Performs a summary of emissions of the energy system."""
if 'emission_analysis' not in self.results.keys():
self.results['emission_analysis'] = analyse_emissions(
results=self.results
)
logging.info("Emission analysis completed.")
# check if constraint and recalculation match
total_em = self.results[
'emission_analysis']['sum']['emissions'].sum()
emission_value = self.results['emissions']
if abs(total_em - emission_value) > 0.01:
raise ValueError(
"The constraint emission value and the re-calculated emissions"
" do not match!"
)
else:
logging.info(
"Check passed: Constraint emission value and recalculated"
" emission match."
)
return self.results['emission_analysis']
def analyse_kpi(self, label_end_energy=None):
"""Description."""
if label_end_energy is None:
label_end_energy = ['demand_heat']
if 'kpi' not in self.results.keys():
costs = self.results['meta']['objective']
emissions = self.results['emissions']
end_energy = sum([
solph.views.node(
self.results['main'], x)["sequences"].values.sum()
for x in label_end_energy])
kpi_dct = {
'absolute costs [€/a]': costs,
'absolute emission [kg/a]': emissions,
'end_energy [kWh/a]': end_energy,
'specific costs [€/kWh]': costs/end_energy,
'specific emission [kg/kWh]': emissions/end_energy,
}
kpi = pd.Series(kpi_dct)
self.results['kpi'] = kpi
else:
kpi = self.results['kpi']
return kpi
def analyse_sequences(self):
"""..."""
if 'sequences' not in self.results.keys():
self.results['sequences'] = \
get_all_sequences(self.results['main'])
ind_length = len(self.results['timeindex'])
df_param = self.results['table_collection']['Timeseries'].copy()
df_param = df_param.iloc[:ind_length]
list_of_tuples = [
('parameter', x.split('.')[0], x.split('.')[1])
for x in df_param.columns
]
df_param.columns = pd.MultiIndex.from_tuples(list_of_tuples)
df_param.index = self.results['timeindex']
self.results['sequences'] = pd.concat(
[self.results['sequences'], df_param], axis=1
)
logging.info("All sequences processed into one DataFrame.")
return self.results['sequences']
def analyse_boundary_flows(self):
"""
Returns the sequences and sums of all sinks and sources.
See postprocessing.get_boundary_flows!
"""
if 'boundary_flows' not in self.results.keys():
self.results['boundary_flows'] = \
get_boundary_flows(self.results['main'])
logging.info("Boundary flows analysis completed.")
return self.results['boundary_flows']
def analyse_heat_generation_flows(self, heat_bus_label='b_heat'):
"""Gets all heat generation flows."""
if 'heat_generation' not in self.results.keys():
self.results['heat_generation'] = \
get_trafo_flow(self.results['main'], label_bus=heat_bus_label)
logging.info("Heat generation flow analysis completed.")
return self.results['heat_generation']
def analyse_heat_bus(self, heat_bus_label='b_heat'):
"""..."""
if 'heat_bus' not in self.results.keys():
self.results['heat_bus'] = \
analyse_bus(self.results['main'], bus_label=heat_bus_label)
logging.info("Heat bus analysed.")
return self.results['heat_bus']
def analyse_electricity_bus(self, elec_bus_label='b_elec'):
"""..."""
if 'electricity_bus' not in self.results.keys():
self.results['electricity_bus'] = \
analyse_bus(self.results['main'], bus_label=elec_bus_label)
logging.info("Electricity bus analysed.")
return self.results['electricity_bus']
def load_district_scenario(path, filename):
"""Load a DistrictScenario class."""
des_restore = DistrictScenario()
des_restore.__dict__ = \
pickle.load(open(os.path.join(path, filename), "rb"))
return des_restore
def check_label(results, label):
"""..."""
pass
class ParetoFront(DistrictScenario):
"""Class for calculation pareto fronts with costs and emission."""
def __init__(self, emission_limits=None, number_of_points=2,
dist_type='linear',
off_set=1,
**kwargs):
super().__init__(**kwargs)
self.number = number_of_points
self.dist_type = dist_type
self.off_set = off_set
self.table_collection_co2opt = None
self.ds_min_co2 = None
self.ds_max_co2 = None
self.e_min = None
self.e_max = None
self.emission_limits = emission_limits
self.district_scenarios = dict()
self.pareto_front = None
# ToDo: sort results District Scenarios
# self.ordered_scenarios = [
# str(x) for x in sorted([int(x) for x in self.des.keys()],
# reverse=True)
# ]
def _get_min_emission(self, **kwargs):
"""Calculates the pareto point with minimum emission."""
sc_co2opt = DistrictScenario(
emission_limit=1000000000,
table_collection=self.table_collection_co2opt,
number_of_time_steps=self.number_of_time_steps,
year=self.year,
)
sc_co2opt.solve(**kwargs)
return sc_co2opt
def _get_max_emssion(self, **kwargs):
sc_costopt = DistrictScenario(
emission_limit=1000000000,
table_collection=self.table_collection,
number_of_time_steps=self.number_of_time_steps,
year=self.year,
)
sc_costopt.solve(**kwargs)
return sc_costopt
def _calc_emission_limits(self):
"""Calculates the emission limits of the pareto front."""
if self.dist_type == 'linear':
limits = []
e_start = self.e_min + self.off_set
interval = (self.e_max - e_start) / (self.number - 1)
for i in range(self.number):
limits.append(e_start + i * interval)
elif self.dist_type == 'logarithmic':
limits = []
e_start = self.e_min + self.off_set
lim_last = self.e_max
limits.append(lim_last)
for i in range(self.number-2):
lim_last = lim_last - (lim_last - e_start) * 0.5
limits.append(lim_last)
limits.append(e_start)
else:
raise ValueError(
'No other method than "linear" for calculation the emission'
' limits implemented yet.'
)
return limits
def _get_pareto_results(self):
"""Gets all cost an emission values of pareto front."""
index = list(self.district_scenarios.keys())
columns = ['costs', 'emissions']
df_pareto = pd.DataFrame(index=index, columns=columns)
for r, _ in df_pareto.iterrows():
df_pareto.at[r, 'costs'] = \
self.district_scenarios[r].results['costs']
df_pareto.at[r, 'emissions'] = \
self.district_scenarios[r].results['emissions']
return df_pareto
def calc_pareto_front(self, dump_esys=False, **kwargs):
"""
Calculates the Pareto front for a given number of points, or
for given emission limits.
First, the cost-optimal and emission optimal solutions are calculated.
Therefore, two optimisation runs are performed.
For the emission optimisation, the table_collection is prepared by
exchanging the `variable_cost` values and the `emission_factor` values.
"""
if self.table_collection is not None:
self.table_collection_co2opt = \
co2_optimisation(self.table_collection)
else:
ValueError('Provide a table_collection!')
self.ds_min_co2 = self._get_min_emission(**kwargs)
self.ds_max_co2 = self._get_max_emssion(**kwargs)
self.e_min = self.ds_min_co2.results['meta']['objective']
self.e_max = self.ds_max_co2.results['emissions']
if self.emission_limits is None:
self.emission_limits = self._calc_emission_limits()
for e in self.emission_limits:
# Scenario name relative to emission range
e_rel = (e - self.e_min) / (self.e_max - self.e_min)
e_str = "{:.2f}".format(e_rel)
# e_str = str(int(round(e)))
ds_name = self.name + '_' + e_str
ds = DistrictScenario(
name=ds_name,
emission_limit=e,
table_collection=self.table_collection,
number_of_time_steps=self.number_of_time_steps,
year=self.year,
)
ds.solve(**kwargs)
self.district_scenarios.update(
{e_str: ds}
)
if dump_esys:
esys_path = os.path.join(self.results_fn, self.name,
"energy_system")
if not os.path.isdir(esys_path):
os.mkdir(esys_path)
ds.dump(path=esys_path, filename=e_str + '_dump.des')
self.results['pareto_front'] = self._get_pareto_results()
def store_results(self, path=None):
"""
Store main results and input table of pareto front in a not python
readable way (.xlsx / .csv).
"""
if path is None:
bpath = os.path.join(os.path.expanduser("~"), ".q100opt")
if not os.path.isdir(bpath):
os.mkdir(bpath)
dpath = os.path.join(bpath, "dumps")
if not os.path.isdir(dpath):
os.mkdir(dpath)
path = os.path.join(dpath, "pareto")
if not os.path.isdir(path):
os.mkdir(path)
# store table_collection
tables_path = os.path.join(path, "input_tables")
if not os.path.isdir(tables_path):
os.mkdir(tables_path)
for name, df in self.table_collection.items():
name = name.replace(" ", "_") + ".csv"
filename = os.path.join(tables_path, name)
df.to_csv(filename)
logging.info(
"Scenario saved as csv-collection to {0}".format(tables_path))
# store pareto results
path_pareto = os.path.join(path, 'pareto_results.xlsx')
self.results['pareto_front'].to_excel(path_pareto)
logging.info(
"Pareto front table saved as xlsx to {0}".format(path_pareto))
def dump(self, path=None, filename=None):
"""
Dumps the results of the pareto front instance.
The oemof.solph.EnergySystems and oemof.solph.Models of the
q100opt.DistrictScenarios are removed before dumping, only the results
are dumped.
"""
# delete all oemof.solph.EnergySystems and oemof.solph.Models
for _, v in self.__dict__.items():
if hasattr(v, 'es') or hasattr(v, 'model'):
setattr(v, 'es', None)
setattr(v, 'model', None)
for _, des in self.district_scenarios.items():
setattr(des, 'es', None)
setattr(des, 'model', None)
pickle.dump(
self.__dict__, open(os.path.join(path, filename), "wb")
)
logging.info(
"ParetoFront dumped to {} as {}".format(path, filename)
)
def restore(self, path=None, filename=None):
"""Restores a district energy system from dump."""
self.__dict__ = load_pareto_front(path, filename).__dict__
logging.info("DistrictEnergySystem restored.")
def analyse_results(self, heat_bus_label='b_heat',
elec_bus_label='b_elec'):
"""
Performs the analyse_results method of the DistrictScenario class
for all scenarios of the pareto front.
"""
for _, des in self.district_scenarios.items():
des.analyse_results(heat_bus_label=heat_bus_label,
elec_bus_label=elec_bus_label)
self.results['kpi'] = self.analyse_kpi()
self.results['heat_generation'] = self.analyse_heat_generation_flows(
heat_bus_label=heat_bus_label
)
self.results['sequences'] = self.analyse_sequences()
self.results['sum'] = self.results['sequences'].sum().unstack(level=0)
self.results['costs'] = self.get_all_costs()
self.results['emissions'] = self.get_all_emissions()
self.results['scalars'] = self.get_all_scalars()
def analyse_kpi(self, label_end_energy=None):
"""
Performs some postprocessing methods for all
DistrictEnergySystems.
"""
if label_end_energy is None:
label_end_energy = ['demand_heat']
d_kpi = {}
for e_key, des in self.district_scenarios.items():
d_kpi.update(
{e_key: des.analyse_kpi(label_end_energy=label_end_energy)}
)
df_kpi = pd.concat(d_kpi, axis=1)
return df_kpi
def get_all_costs(self):
"""
Puts all cost analysis of the individual DistrictScenarios into
one Multi-index DataFrame.
"""
d_costs = {}
for e_key, des in self.district_scenarios.items():
d_costs.update(
{e_key: des.results["cost_analysis"]["all"].stack()}
)
df_costs = pd.concat(d_costs, names=['emission_limit'])
return df_costs.unstack(level=0).T
def get_all_emissions(self):
"""
Puts all emissions analyses of the individual DistrictScenarios into
one Multi-index DataFrame.
"""
d_emissions = {}
for e_key, des in self.district_scenarios.items():
d_emissions.update(
{e_key: pd.concat(
{'emission': des.results["emission_analysis"]["sum"]}
).stack()}
)
df_emissions = | pd.concat(d_emissions, names=['emission_limit']) | pandas.concat |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
left.values, right.values, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
lzip(left, right), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = Index(ivs, name=name)
assert isinstance(result, IntervalIndex)
tm.assert_index_equal(result, expected)
# idempotent
tm.assert_index_equal(Index(expected), expected)
tm.assert_index_equal(IntervalIndex(expected), expected)
result = IntervalIndex.from_intervals(expected)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(
expected.values, name=expected.name)
tm.assert_index_equal(result, expected)
left, right = expected.left, expected.right
result = IntervalIndex.from_arrays(
left, right, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
expected.to_tuples(), closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
breaks = expected.left.tolist() + [expected.right[-1]]
result = IntervalIndex.from_breaks(
breaks, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50])
def test_constructors_nan(self, closed, data):
# GH 18421
expected_values = np.array(data, dtype=object)
expected_idx = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_idx.closed == closed
tm.assert_numpy_array_equal(expected_idx.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks([np.nan] + data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
@pytest.mark.parametrize('data', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype=object)])
def test_constructors_empty(self, data, closed):
# GH 18421
expected_dtype = data.dtype if isinstance(data, np.ndarray) else object
expected_values = np.array([], dtype=object)
expected_index = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_index.empty
assert expected_index.closed == closed
assert expected_index.dtype.subtype == expected_dtype
tm.assert_numpy_array_equal(expected_index.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
def test_constructors_errors(self):
# scalar
msg = ('IntervalIndex\(...\) must be called with a collection of '
'some kind, 5 was passed')
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex(5)
# not an interval
msg = ("type <(class|type) 'numpy.int64'> with value 0 "
"is not an interval")
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex([0, 1])
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex.from_intervals([0, 1])
# invalid closed
msg = "invalid options for 'closed': invalid"
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed within intervals
msg = 'intervals must all be closed on the same side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with tm.assert_raises_regex(ValueError, msg):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# mismatched closed inferred from intervals vs constructor.
msg = 'conflicting values for closed'
with tm.assert_raises_regex(ValueError, msg):
iv = [Interval(0, 1, closed='both'), Interval(1, 2, closed='both')]
IntervalIndex(iv, closed='neither')
# no point in nesting periods in an IntervalIndex
msg = 'Period dtypes are not supported, use a PeriodIndex instead'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
# decreasing breaks/arrays
msg = 'left side of interval must be <= right side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(range(10, -1, -1))
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays(range(10, -1, -1), range(9, -2, -1))
def test_constructors_datetimelike(self, closed):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = IntervalIndex.from_breaks(idx, closed=closed)
expected = IntervalIndex.from_breaks(idx.values, closed=closed)
tm.assert_index_equal(result, expected)
expected_scalar_type = type(idx[0])
i = result[0]
assert isinstance(i.left, expected_scalar_type)
assert isinstance(i.right, expected_scalar_type)
def test_constructors_error(self):
# non-intervals
def f():
IntervalIndex.from_intervals([0.997, 4.0])
pytest.raises(TypeError, f)
def test_properties(self, closed):
index = self.create_index(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
tm.assert_index_equal(index.left, Index(np.arange(10)))
tm.assert_index_equal(index.right, Index(np.arange(1, 11)))
tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5)))
assert index.closed == closed
ivs = [Interval(l, r, closed) for l, r in zip(range(10), range(1, 11))]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
# with nans
index = self.create_index_with_nan(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
expected_left = Index([0, np.nan, 2, 3, 4, 5, 6, 7, 8, 9])
expected_right = expected_left + 1
expected_mid = expected_left + 0.5
tm.assert_index_equal(index.left, expected_left)
tm.assert_index_equal(index.right, expected_right)
tm.assert_index_equal(index.mid, expected_mid)
assert index.closed == closed
ivs = [Interval(l, r, closed) if notna(l) else np.nan
for l, r in zip(expected_left, expected_right)]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
def test_with_nans(self, closed):
index = self.create_index(closed=closed)
assert not index.hasnans
result = index.isna()
expected = np.repeat(False, len(index))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.repeat(True, len(index))
tm.assert_numpy_array_equal(result, expected)
index = self.create_index_with_nan(closed=closed)
assert index.hasnans
result = index.isna()
expected = np.array([False, True] + [False] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.array([True, False] + [True] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
def test_copy(self, closed):
expected = self.create_index(closed=closed)
result = expected.copy()
assert result.equals(expected)
result = expected.copy(deep=True)
assert result.equals(expected)
assert result.left is not expected.left
def test_ensure_copied_data(self, closed):
# exercise the copy flag in the constructor
# not copying
index = self.create_index(closed=closed)
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex.from_intervals(index.values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self, closed):
expected = IntervalIndex.from_breaks(np.arange(5), closed=closed)
assert expected.equals(expected)
assert expected.equals(expected.copy())
assert not expected.equals(expected.astype(object))
assert not expected.equals(np.array(expected))
assert not expected.equals(list(expected))
assert not expected.equals([1, 2])
assert not expected.equals(np.array([1, 2]))
assert not expected.equals(pd.date_range('20130101', periods=2))
expected_name1 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='foo')
expected_name2 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='bar')
assert expected.equals(expected_name1)
assert expected_name1.equals(expected_name2)
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
expected_other_closed = IntervalIndex.from_breaks(
np.arange(5), closed=other_closed)
assert not expected.equals(expected_other_closed)
def test_astype(self, closed):
idx = self.create_index(closed=closed)
for dtype in [np.int64, np.float64, 'datetime64[ns]',
'datetime64[ns, US/Eastern]', 'timedelta64',
'period[M]']:
pytest.raises(ValueError, idx.astype, dtype)
result = idx.astype(object)
tm.assert_index_equal(result, Index(idx.values, dtype='object'))
assert not idx.equals(result)
assert idx.equals(IntervalIndex.from_intervals(result))
result = idx.astype('interval')
tm.assert_index_equal(result, idx)
assert result.equals(idx)
result = idx.astype('category')
expected = pd.Categorical(idx, ordered=True)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series])
def test_where(self, closed, klass):
idx = self.create_index(closed=closed)
cond = [True] * len(idx)
expected = idx
result = expected.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(idx[1:])
expected = IntervalIndex([np.nan] + idx[1:].tolist())
result = idx.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_delete(self, closed):
expected = IntervalIndex.from_breaks(np.arange(1, 11), closed=closed)
result = self.create_index(closed=closed).delete(0)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [
interval_range(0, periods=10, closed='neither'),
interval_range(1.7, periods=8, freq=2.5, closed='both'),
interval_range(Timestamp('20170101'), periods=12, closed='left'),
interval_range(Timedelta('1 day'), periods=6, closed='right'),
IntervalIndex.from_tuples([('a', 'd'), ('e', 'j'), ('w', 'z')]),
IntervalIndex.from_tuples([(1, 2), ('a', 'z'), (3.14, 6.28)])])
def test_insert(self, data):
item = data[0]
idx_item = IntervalIndex([item])
# start
expected = idx_item.append(data)
result = data.insert(0, item)
tm.assert_index_equal(result, expected)
# end
expected = data.append(idx_item)
result = data.insert(len(data), item)
tm.assert_index_equal(result, expected)
# mid
expected = data[:3].append(idx_item).append(data[3:])
result = data.insert(3, item)
tm.assert_index_equal(result, expected)
# invalid type
msg = 'can only insert Interval objects and NA into an IntervalIndex'
with tm.assert_raises_regex(ValueError, msg):
data.insert(1, 'foo')
# invalid closed
msg = 'inserted item must be closed on the same side as the index'
for closed in {'left', 'right', 'both', 'neither'} - {item.closed}:
with tm.assert_raises_regex(ValueError, msg):
bad_item = Interval(item.left, item.right, closed=closed)
data.insert(1, bad_item)
# GH 18295 (test missing)
na_idx = IntervalIndex([np.nan], closed=data.closed)
for na in (np.nan, pd.NaT, None):
expected = data[:1].append(na_idx).append(data[1:])
result = data.insert(1, na)
tm.assert_index_equal(result, expected)
def test_take(self, closed):
index = self.create_index(closed=closed)
result = index.take(range(10))
tm.assert_index_equal(result, index)
result = index.take([0, 0, 1])
expected = IntervalIndex.from_arrays(
[0, 0, 1], [1, 1, 2], closed=closed)
tm.assert_index_equal(result, expected)
def test_unique(self, closed):
# unique non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_unique
# unique overlapping - distinct endpoints
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed)
assert idx.is_unique
# unique overlapping - shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_unique
# unique nested
idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed)
assert idx.is_unique
# duplicate
idx = IntervalIndex.from_tuples(
[(0, 1), (0, 1), (2, 3)], closed=closed)
assert not idx.is_unique
# unique mixed
idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b')], closed=closed)
assert idx.is_unique
# duplicate mixed
idx = IntervalIndex.from_tuples(
[(0, 1), ('a', 'b'), (0, 1)], closed=closed)
assert not idx.is_unique
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_unique
def test_monotonic(self, closed):
# increasing non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing non-overlapping
idx = IntervalIndex.from_tuples(
[(4, 5), (2, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (4, 5), (2, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping
idx = IntervalIndex.from_tuples(
[(0, 2), (0.5, 2.5), (1, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping
idx = IntervalIndex.from_tuples(
[(1, 3), (0.5, 2.5), (0, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered overlapping
idx = IntervalIndex.from_tuples(
[(0.5, 2.5), (0, 2), (1, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(2, 3), (1, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# stationary
idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], closed=closed)
assert idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr(self):
i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed='right')
expected = ("IntervalIndex(left=[0, 1],"
"\n right=[1, 2],"
"\n closed='right',"
"\n dtype='interval[int64]')")
assert repr(i) == expected
i = IntervalIndex.from_tuples((Timestamp('20130101'),
Timestamp('20130102')),
(Timestamp('20130102'),
Timestamp('20130103')),
closed='right')
expected = ("IntervalIndex(left=['2013-01-01', '2013-01-02'],"
"\n right=['2013-01-02', '2013-01-03'],"
"\n closed='right',"
"\n dtype='interval[datetime64[ns]]')")
assert repr(i) == expected
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_max_seq_item_setting(self):
super(TestIntervalIndex, self).test_repr_max_seq_item_setting()
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_roundtrip(self):
super(TestIntervalIndex, self).test_repr_roundtrip()
def test_get_item(self, closed):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan),
closed=closed)
assert i[0] == Interval(0.0, 1.0, closed=closed)
assert i[1] == Interval(1.0, 2.0, closed=closed)
assert isna(i[2])
result = i[0:1]
expected = IntervalIndex.from_arrays((0.,), (1.,), closed=closed)
tm.assert_index_equal(result, expected)
result = i[0:2]
expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed=closed)
tm.assert_index_equal(result, expected)
result = i[1:3]
expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan),
closed=closed)
tm.assert_index_equal(result, expected)
def test_get_loc_value(self):
pytest.raises(KeyError, self.index.get_loc, 0)
assert self.index.get_loc(0.5) == 0
assert self.index.get_loc(1) == 0
assert self.index.get_loc(1.5) == 1
assert self.index.get_loc(2) == 1
pytest.raises(KeyError, self.index.get_loc, -1)
pytest.raises(KeyError, self.index.get_loc, 3)
idx = IntervalIndex.from_tuples([(0, 2), (1, 3)])
assert idx.get_loc(0.5) == 0
assert idx.get_loc(1) == 0
tm.assert_numpy_array_equal(idx.get_loc(1.5),
np.array([0, 1], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)),
np.array([0, 1], dtype='int64'))
assert idx.get_loc(3) == 1
pytest.raises(KeyError, idx.get_loc, 3.5)
idx = IntervalIndex.from_arrays([0, 2], [1, 3])
pytest.raises(KeyError, idx.get_loc, 1.5)
def slice_locs_cases(self, breaks):
# TODO: same tests for more index types
index = IntervalIndex.from_breaks([0, 1, 2], closed='right')
assert index.slice_locs() == (0, 2)
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(0, 0.5) == (0, 1)
assert index.slice_locs(start=1) == (0, 2)
assert index.slice_locs(start=1.2) == (1, 2)
assert index.slice_locs(end=1) == (0, 1)
assert index.slice_locs(end=1.1) == (0, 2)
assert index.slice_locs(end=1.0) == (0, 1)
assert index.slice_locs(-1, -1) == (0, 0)
index = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(1, 1) == (1, 1)
assert index.slice_locs(1, 2) == (1, 2)
index = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)],
closed='both')
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(1, 2) == (0, 2)
def test_slice_locs_int64(self):
self.slice_locs_cases([0, 1, 2])
def test_slice_locs_float64(self):
self.slice_locs_cases([0.0, 1.0, 2.0])
def slice_locs_decreasing_cases(self, tuples):
index = IntervalIndex.from_tuples(tuples)
assert index.slice_locs(1.5, 0.5) == (1, 3)
assert index.slice_locs(2, 0) == (1, 3)
assert index.slice_locs(2, 1) == (1, 3)
assert index.slice_locs(3, 1.1) == (0, 3)
assert index.slice_locs(3, 3) == (0, 2)
assert index.slice_locs(3.5, 3.3) == (0, 1)
assert index.slice_locs(1, -3) == (2, 3)
slice_locs = index.slice_locs(-1, -1)
assert slice_locs[0] == slice_locs[1]
def test_slice_locs_decreasing_int64(self):
self.slice_locs_cases([(2, 4), (1, 3), (0, 2)])
def test_slice_locs_decreasing_float64(self):
self.slice_locs_cases([(2., 4.), (1., 3.), (0., 2.)])
def test_slice_locs_fails(self):
index = IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)])
with pytest.raises(KeyError):
index.slice_locs(1, 2)
def test_get_loc_interval(self):
assert self.index.get_loc(Interval(0, 1)) == 0
assert self.index.get_loc(Interval(0, 0.5)) == 0
assert self.index.get_loc(Interval(0, 1, 'left')) == 0
pytest.raises(KeyError, self.index.get_loc, Interval(2, 3))
pytest.raises(KeyError, self.index.get_loc,
Interval(-1, 0, 'left'))
def test_get_indexer(self):
actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(self.index)
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
index = IntervalIndex.from_breaks([0, 1, 2], closed='left')
actual = index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, 0, 0, 1, 1, -1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index[:1])
expected = np.array([0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index)
expected = np.array([-1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_get_indexer_subintervals(self):
# TODO: is this right?
# return indexers for wholly contained subintervals
target = IntervalIndex.from_breaks(np.linspace(0, 2, 5))
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='p')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.67, 1.33, 2])
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(target[[0, -1]])
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.33, 0.67, 1], closed='left')
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_contains(self):
# Only endpoints are valid.
i = IntervalIndex.from_arrays([0, 1], [1, 2])
# Invalid
assert 0 not in i
assert 1 not in i
assert 2 not in i
# Valid
assert Interval(0, 1) in i
assert Interval(0, 2) in i
assert Interval(0, 0.5) in i
assert Interval(3, 5) not in i
assert Interval(-1, 0, closed='left') not in i
def testcontains(self):
# can select values that are IN the range of a value
i = IntervalIndex.from_arrays([0, 1], [1, 2])
assert i.contains(0.1)
assert i.contains(0.5)
assert i.contains(1)
assert i.contains(Interval(0, 1))
assert i.contains(Interval(0, 2))
# these overlaps completely
assert i.contains(Interval(0, 3))
assert i.contains(Interval(1, 3))
assert not i.contains(20)
assert not i.contains(-20)
def test_dropna(self, closed):
expected = IntervalIndex.from_tuples(
[(0.0, 1.0), (1.0, 2.0)], closed=closed)
ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
ii = IntervalIndex.from_arrays(
[0, 1, np.nan], [1, 2, np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
def test_non_contiguous(self, closed):
index = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed)
target = [0.5, 1.5, 2.5]
actual = index.get_indexer(target)
expected = np.array([0, -1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
assert 1.5 not in index
def test_union(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(13), closed=closed)
result = index.union(other)
tm.assert_index_equal(result, expected)
result = other.union(index)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index.union(index), index)
tm.assert_index_equal(index.union(index[:1]), index)
def test_intersection(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(5, 11), closed=closed)
result = index.intersection(other)
tm.assert_index_equal(result, expected)
result = other.intersection(index)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index.intersection(index), index)
def test_difference(self, closed):
index = self.create_index(closed=closed)
tm.assert_index_equal(index.difference(index[:1]), index[1:])
def test_symmetric_difference(self, closed):
idx = self.create_index(closed=closed)
result = idx[1:].symmetric_difference(idx[:-1])
expected = IntervalIndex([idx[0], idx[-1]])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('op_name', [
'union', 'intersection', 'difference', 'symmetric_difference'])
def test_set_operation_errors(self, closed, op_name):
index = self.create_index(closed=closed)
set_op = getattr(index, op_name)
# test errors
msg = ('can only do set operations between two IntervalIndex objects '
'that are closed on the same side')
with tm.assert_raises_regex(ValueError, msg):
set_op(Index([1, 2, 3]))
for other_closed in {'right', 'left', 'both', 'neither'} - {closed}:
other = self.create_index(closed=other_closed)
with tm.assert_raises_regex(ValueError, msg):
set_op(other)
def test_isin(self, closed):
index = self.create_index(closed=closed)
expected = np.array([True] + [False] * (len(index) - 1))
result = index.isin(index[:1])
tm.assert_numpy_array_equal(result, expected)
result = index.isin([index[0]])
tm.assert_numpy_array_equal(result, expected)
other = IntervalIndex.from_breaks(np.arange(-2, 10), closed=closed)
expected = np.array([True] * (len(index) - 1) + [False])
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
for other_closed in {'right', 'left', 'both', 'neither'}:
other = self.create_index(closed=other_closed)
expected = np.repeat(closed == other_closed, len(index))
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
def test_comparison(self):
actual = Interval(0, 1) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = Interval(0.5, 1.5) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > Interval(0.5, 1.5)
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index <= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index >= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index < self.index
expected = np.array([False, False])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == IntervalIndex.from_breaks([0, 1, 2], 'left')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index.values == self.index
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index <= self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index != self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index > self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index.values > self.index
tm.assert_numpy_array_equal(actual, np.array([False, False]))
# invalid comparisons
actual = self.index == 0
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index == self.index.left
tm.assert_numpy_array_equal(actual, np.array([False, False]))
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index > 0
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index <= 0
with pytest.raises(TypeError):
self.index > np.arange(2)
with pytest.raises(ValueError):
self.index > np.arange(3)
def test_missing_values(self, closed):
idx = Index([np.nan, Interval(0, 1, closed=closed),
Interval(1, 2, closed=closed)])
idx2 = IntervalIndex.from_arrays(
[np.nan, 0, 1], [np.nan, 1, 2], closed=closed)
assert idx.equals(idx2)
with pytest.raises(ValueError):
IntervalIndex.from_arrays(
[np.nan, 0, 1], np.array([0, 1, 2]), closed=closed)
tm.assert_numpy_array_equal(isna(idx),
np.array([True, False, False]))
def test_sort_values(self, closed):
index = self.create_index(closed=closed)
result = index.sort_values()
tm.assert_index_equal(result, index)
result = index.sort_values(ascending=False)
tm.assert_index_equal(result, index[::-1])
# with nan
index = IntervalIndex([Interval(1, 2), np.nan, Interval(0, 1)])
result = index.sort_values()
expected = IntervalIndex([Interval(0, 1), Interval(1, 2), np.nan])
tm.assert_index_equal(result, expected)
result = index.sort_values(ascending=False)
expected = IntervalIndex([np.nan, Interval(1, 2), Interval(0, 1)])
tm.assert_index_equal(result, expected)
def test_datetime(self):
dates = date_range('2000', periods=3)
idx = IntervalIndex.from_breaks(dates)
tm.assert_index_equal(idx.left, dates[:2])
tm.assert_index_equal(idx.right, dates[-2:])
expected = date_range('2000-01-01T12:00', periods=2)
tm.assert_index_equal(idx.mid, expected)
assert Timestamp('2000-01-01T12') not in idx
assert Timestamp('2000-01-01T12') not in idx
target = date_range('1999-12-31T12:00', periods=7, freq='12H')
actual = idx.get_indexer(target)
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_append(self, closed):
index1 = IntervalIndex.from_arrays([0, 1], [1, 2], closed=closed)
index2 = IntervalIndex.from_arrays([1, 2], [2, 3], closed=closed)
result = index1.append(index2)
expected = IntervalIndex.from_arrays(
[0, 1, 1, 2], [1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
result = index1.append([index1, index2])
expected = IntervalIndex.from_arrays(
[0, 1, 0, 1, 1, 2], [1, 2, 1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
msg = ('can only append two IntervalIndex objects that are closed '
'on the same side')
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
index_other_closed = IntervalIndex.from_arrays(
[0, 1], [1, 2], closed=other_closed)
with tm.assert_raises_regex(ValueError, msg):
index1.append(index_other_closed)
def test_is_non_overlapping_monotonic(self, closed):
# Should be True in all cases
tpls = [(0, 1), (2, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is True
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is True
# Should be False in all cases (overlapping)
tpls = [(0, 2), (1, 3), (4, 5), (6, 7)]
idx = | IntervalIndex.from_tuples(tpls, closed=closed) | pandas.IntervalIndex.from_tuples |
import sys
import numpy as np
import pandas as pd
from collections import Counter, defaultdict
import csv
import math
import operator
import os
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import roc_auc_score
import time
import plotly as py
import plotly.graph_objs as go
abs_path = os.path.abspath(__file__)
file_dir = os.path.dirname(abs_path)
sys.path.append(file_dir)
from feat_eng import *
from modeling import *
from mlp_bayes_opt_legit import *
from create_pred_set import *
os.chdir("") # Insert path to dunhumby data sets
def group_basket_stats(product_list, df_transactions, df_demographic):
print("Grouping Baskets...")
df_grouped_basket = get_grouped_basket(df_transactions)
print("getting product counts for each basket")
df_grouped_basket_count = get_grouped_basket_count(df_grouped_basket)
print("getting summed quantities for each basket id...")
df_grouped_basket_sum = get_grouped_basket_sum(df_grouped_basket)
print("Applying label...")
df_grouped_basket = apply_label_grouped_basket(df_grouped_basket)
print("merging count, sum and labels...")
df_grouped_basket_merge = merging_sum_count_labels(df_grouped_basket, df_grouped_basket_count, df_grouped_basket_sum)
print("merging with demmographic data....")
df_grouped_basket_merge = df_grouped_basket_merge.merge(df_demographic, on="household_key", how="left").reset_index(drop=True)
print("First ten rows of the dataset...")
print(df_grouped_basket_merge.head(10)) # Sanity check
return df_grouped_basket_merge
def get_grouped_basket(df_transactions):
return df_transactions.groupby(['household_key', 'BASKET_ID', 'DAY'])
def get_grouped_basket_count(df_grouped_basket):
df_grouped_basket_count = df_grouped_basket.size().reset_index()
df_grouped_basket_count = df_grouped_basket_count.rename(columns={0: 'PROD_PURCHASE_COUNT'})
return df_grouped_basket_count
def apply_label_grouped_basket(df_grouped_basket):
df_grouped_basket = df_grouped_basket.apply(
lambda x : 1 if len(set(x.PRODUCT_ID.tolist()) & set(product_list)) > 0 else 0
).reset_index().rename(columns={0:"label"})
return df_grouped_basket
def get_grouped_basket_sum(df_grouped_basket):
df_grouped_basket_sum = df_grouped_basket.sum().reset_index()
df_grouped_basket_sum.drop(['RETAIL_DISC', 'TRANS_TIME', 'COUPON_MATCH_DISC', 'START_DAY', 'END_DAY'], axis=1, inplace=True)
return df_grouped_basket_sum
def merging_sum_count_labels(df_grouped_basket, df_grouped_basket_count, df_grouped_basket_sum):
df_grouped_basket_merge = df_grouped_basket_sum.merge(df_grouped_basket, on=["household_key", "BASKET_ID"]).reset_index(drop=True)
del df_grouped_basket
del df_grouped_basket_sum
df_grouped_basket_merge = df_grouped_basket_merge.merge(df_grouped_basket_count, on=["household_key", "BASKET_ID"]).reset_index(drop=True)
del df_grouped_basket_count
df_grouped_basket_merge = df_grouped_basket_merge.drop(['DAY_x', 'DAY_y'], axis=1)
return df_grouped_basket_merge
def get_products_for_coupon(coupon_Id, df_coupon):
subset = df_coupon[df_coupon['COUPON_UPC'] == coupon_Id]
return subset['PRODUCT_ID'].unique()
def get_campaigns_for_coupon(coupon_Id, df_coupon):
subset = df_coupon[df_coupon['COUPON_UPC'] == coupon_Id]
return subset['CAMPAIGN'].unique()
def get_households_for_campaigns(campaigns, df_campaign_table, df_campaign_desc):
#get subset from campaign table to get the households for the campaign
subset = df_campaign_table[df_campaign_table['CAMPAIGN'].isin(campaigns)]
hh_start_dates = subset.merge(df_campaign_desc, on='CAMPAIGN', how='left')
hh_start_dates = hh_start_dates.sort_values(['household_key', 'START_DAY'])
return hh_start_dates.drop_duplicates(['household_key'], keep="first")
def get_transactions_for_hh(df_transactions, hh_start_dates):
trans_merge = df_transactions.merge(hh_start_dates, on='household_key', how='left')
trans_merge['START_DAY'].fillna(10000, inplace=True)
return trans_merge[trans_merge['DAY'].astype(float) < trans_merge['START_DAY']]
def get_transactions_for_hh_within(df_transactions, hh_start_dates, product_list):
trans_merge = df_transactions.merge(hh_start_dates, on='household_key', how='left')
trans_merge['START_DAY'].fillna(10000, inplace=True)
trans_merge['END_DAY'].fillna(0, inplace=True)
trans_filtered = trans_merge[(trans_merge['DAY'].astype(float) >= trans_merge['START_DAY']) & (
trans_merge['DAY'].astype(float) <= trans_merge['END_DAY'])]
trans_filtered['label'] = 0
trans_filtered['label'] = trans_filtered.apply(lambda row: 1 if row['PRODUCT_ID'] in product_list else 0,
axis=1)
trans_filtered = trans_filtered[trans_filtered['label'] == 1]
return trans_filtered[['household_key', 'PRODUCT_ID', 'CAMPAIGN']], list(trans_filtered['household_key'].unique())
if __name__ == "__main__":
# coupon_id_list = ["10000089073", "57940011075", "10000089061", "51800000050"]
coupon_Id = "51800000050"
print("Coupon ID: " + coupon_Id)
print("Reading coupon data...")
df_coupon = pd.read_csv('coupon.csv', dtype={'COUPON_UPC': str, 'CAMPAIGN': str, 'PRODUCT_ID': str})
campaigns = get_campaigns_for_coupon(coupon_Id, df_coupon)
print("Campaigns associated with the coupon: " + str(len(campaigns)))
product_list = get_products_for_coupon(coupon_Id, df_coupon)
del df_coupon
print("Products associated with the coupon: "+ str(len(product_list)))
print("Reading in campaign_table and campaign_desc...")
df_campaign_table = | pd.read_csv('campaign_table.csv', dtype={'household_key': str, 'CAMPAIGN': str}) | pandas.read_csv |
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from evalml.automl import get_default_primary_search_objective
from evalml.data_checks import (
DataCheckAction,
DataCheckActionCode,
DataCheckError,
DataCheckMessageCode,
DataChecks,
DataCheckWarning,
InvalidTargetDataCheck,
)
from evalml.exceptions import DataCheckInitError
from evalml.objectives import (
MAPE,
MeanSquaredLogError,
RootMeanSquaredLogError,
)
from evalml.problem_types import (
ProblemTypes,
is_binary,
is_multiclass,
is_regression,
)
from evalml.utils.woodwork_utils import numeric_and_boolean_ww
invalid_targets_data_check_name = InvalidTargetDataCheck.name
def test_invalid_target_data_check_invalid_n_unique():
with pytest.raises(
ValueError, match="`n_unique` must be a non-negative integer value."
):
InvalidTargetDataCheck(
"regression",
get_default_primary_search_objective("regression"),
n_unique=-1,
)
def test_invalid_target_data_check_nan_error():
X = pd.DataFrame({"col": [1, 2, 3]})
invalid_targets_check = InvalidTargetDataCheck(
"regression", get_default_primary_search_objective("regression")
)
assert invalid_targets_check.validate(X, y=pd.Series([1, 2, 3])) == {
"warnings": [],
"errors": [],
"actions": [],
}
assert invalid_targets_check.validate(X, y=pd.Series([np.nan, np.nan, np.nan])) == {
"warnings": [],
"errors": [
DataCheckError(
message="Target is either empty or fully null.",
data_check_name=invalid_targets_data_check_name,
message_code=DataCheckMessageCode.TARGET_IS_EMPTY_OR_FULLY_NULL,
details={},
).to_dict(),
],
"actions": [],
}
def test_invalid_target_data_check_numeric_binary_classification_valid_float():
y = pd.Series([0.0, 1.0, 0.0, 1.0])
X = pd.DataFrame({"col": range(len(y))})
invalid_targets_check = InvalidTargetDataCheck(
"binary", get_default_primary_search_objective("binary")
)
assert invalid_targets_check.validate(X, y) == {
"warnings": [],
"errors": [],
"actions": [],
}
def test_invalid_target_data_check_multiclass_two_examples_per_class():
y = pd.Series([0] + [1] * 19 + [2] * 80)
X = pd.DataFrame({"col": range(len(y))})
invalid_targets_check = InvalidTargetDataCheck(
"multiclass", get_default_primary_search_objective("binary")
)
expected_message = "Target does not have at least two instances per class which is required for multiclass classification"
# with 1 class not having min 2 instances
assert invalid_targets_check.validate(X, y) == {
"warnings": [],
"errors": [
DataCheckError(
message=expected_message,
data_check_name=invalid_targets_data_check_name,
message_code=DataCheckMessageCode.TARGET_MULTICLASS_NOT_TWO_EXAMPLES_PER_CLASS,
details={"least_populated_class_labels": [0]},
).to_dict()
],
"actions": [],
}
y = pd.Series([0] + [1] + [2] * 98)
X = pd.DataFrame({"col": range(len(y))})
# with 2 classes not having min 2 instances
assert invalid_targets_check.validate(X, y) == {
"warnings": [],
"errors": [
DataCheckError(
message=expected_message,
data_check_name=invalid_targets_data_check_name,
message_code=DataCheckMessageCode.TARGET_MULTICLASS_NOT_TWO_EXAMPLES_PER_CLASS,
details={"least_populated_class_labels": [0, 1]},
).to_dict()
],
"actions": [],
}
@pytest.mark.parametrize(
"pd_type", ["int16", "int32", "int64", "float16", "float32", "float64", "bool"]
)
def test_invalid_target_data_check_invalid_pandas_data_types_error(pd_type):
y = pd.Series([0, 1, 0, 0, 1, 0, 1, 0])
y = y.astype(pd_type)
X = pd.DataFrame({"col": range(len(y))})
invalid_targets_check = InvalidTargetDataCheck(
"binary", get_default_primary_search_objective("binary")
)
assert invalid_targets_check.validate(X, y) == {
"warnings": [],
"errors": [],
"actions": [],
}
y = pd.Series(pd.date_range("2000-02-03", periods=5, freq="W"))
X = pd.DataFrame({"col": range(len(y))})
unique_values = y.value_counts().index.tolist()
assert invalid_targets_check.validate(X, y) == {
"warnings": [],
"errors": [
DataCheckError(
message="Target is unsupported {} type. Valid Woodwork logical types include: {}".format(
"Datetime",
", ".join([ltype for ltype in numeric_and_boolean_ww]),
),
data_check_name=invalid_targets_data_check_name,
message_code=DataCheckMessageCode.TARGET_UNSUPPORTED_TYPE,
details={"unsupported_type": "datetime"},
).to_dict(),
DataCheckError(
message="Binary class targets require exactly two unique values.",
data_check_name=invalid_targets_data_check_name,
message_code=DataCheckMessageCode.TARGET_BINARY_NOT_TWO_UNIQUE_VALUES,
details={"target_values": unique_values},
).to_dict(),
],
"actions": [],
}
def test_invalid_target_y_none():
invalid_targets_check = InvalidTargetDataCheck(
"binary", get_default_primary_search_objective("binary")
)
assert invalid_targets_check.validate(pd.DataFrame(), y=None) == {
"warnings": [],
"errors": [
DataCheckError(
message="Target is None",
data_check_name=invalid_targets_data_check_name,
message_code=DataCheckMessageCode.TARGET_IS_NONE,
details={},
).to_dict()
],
"actions": [],
}
def test_invalid_target_data_input_formats():
invalid_targets_check = InvalidTargetDataCheck(
"binary", get_default_primary_search_objective("binary")
)
# test empty pd.Series
X = pd.DataFrame()
messages = invalid_targets_check.validate(X, pd.Series())
assert messages == {
"warnings": [],
"errors": [
DataCheckError(
message="Target is either empty or fully null.",
data_check_name=invalid_targets_data_check_name,
message_code=DataCheckMessageCode.TARGET_IS_EMPTY_OR_FULLY_NULL,
details={},
).to_dict()
],
"actions": [],
}
expected = {
"warnings": [],
"errors": [
DataCheckError(
message="3 row(s) (75.0%) of target values are null",
data_check_name=invalid_targets_data_check_name,
message_code=DataCheckMessageCode.TARGET_HAS_NULL,
details={"num_null_rows": 3, "pct_null_rows": 75},
).to_dict(),
DataCheckError(
message="Binary class targets require exactly two unique values.",
data_check_name=invalid_targets_data_check_name,
message_code=DataCheckMessageCode.TARGET_BINARY_NOT_TWO_UNIQUE_VALUES,
details={"target_values": [0]},
).to_dict(),
],
"actions": [
DataCheckAction(
DataCheckActionCode.IMPUTE_COL,
data_check_name=invalid_targets_data_check_name,
metadata={
"is_target": True,
"impute_strategy": "most_frequent",
},
).to_dict()
],
}
# test Woodwork
y = pd.Series([None, None, None, 0])
X = pd.DataFrame({"col": range(len(y))})
messages = invalid_targets_check.validate(X, y)
assert messages == expected
# test list
y = [np.nan, np.nan, np.nan, 0]
X = pd.DataFrame({"col": range(len(y))})
messages = invalid_targets_check.validate(X, y)
assert messages == expected
# test np.array
y = np.array([np.nan, np.nan, np.nan, 0])
X = pd.DataFrame({"col": range(len(y))})
messages = invalid_targets_check.validate(X, y)
assert messages == expected
@pytest.mark.parametrize(
"problem_type", [ProblemTypes.BINARY, ProblemTypes.TIME_SERIES_BINARY]
)
def test_invalid_target_data_check_n_unique(problem_type):
y = pd.Series(list(range(100, 200)) + list(range(200)))
unique_values = y.value_counts().index.tolist()[:100] # n_unique defaults to 100
X = pd.DataFrame({"col": range(len(y))})
invalid_targets_check = InvalidTargetDataCheck(
problem_type, get_default_primary_search_objective(problem_type)
)
# Test default value of n_unique
assert invalid_targets_check.validate(X, y) == {
"warnings": [],
"errors": [
DataCheckError(
message="Binary class targets require exactly two unique values.",
data_check_name=invalid_targets_data_check_name,
message_code=DataCheckMessageCode.TARGET_BINARY_NOT_TWO_UNIQUE_VALUES,
details={"target_values": unique_values},
).to_dict()
],
"actions": [],
}
# Test number of unique values < n_unique
y = pd.Series(range(20))
X = pd.DataFrame({"col": range(len(y))})
unique_values = y.value_counts().index.tolist()
assert invalid_targets_check.validate(X, y) == {
"warnings": [],
"errors": [
DataCheckError(
message="Binary class targets require exactly two unique values.",
data_check_name=invalid_targets_data_check_name,
message_code=DataCheckMessageCode.TARGET_BINARY_NOT_TWO_UNIQUE_VALUES,
details={"target_values": unique_values},
).to_dict()
],
"actions": [],
}
# Test n_unique is None
invalid_targets_check = InvalidTargetDataCheck(
"binary", get_default_primary_search_objective("binary"), n_unique=None
)
y = pd.Series(range(150))
X = pd.DataFrame({"col": range(len(y))})
unique_values = y.value_counts().index.tolist()
assert invalid_targets_check.validate(X, y) == {
"warnings": [],
"errors": [
DataCheckError(
message="Binary class targets require exactly two unique values.",
data_check_name=invalid_targets_data_check_name,
message_code=DataCheckMessageCode.TARGET_BINARY_NOT_TWO_UNIQUE_VALUES,
details={"target_values": unique_values},
).to_dict()
],
"actions": [],
}
@pytest.mark.parametrize(
"objective",
[
"Root Mean Squared Log Error",
"Mean Squared Log Error",
"Mean Absolute Percentage Error",
],
)
def test_invalid_target_data_check_invalid_labels_for_nonnegative_objective_names(
objective,
):
X = pd.DataFrame({"column_one": [100, 200, 100, 200, 200, 100, 200, 100] * 25})
y = pd.Series([2, 2, 3, 3, -1, -1, 1, 1] * 25)
data_checks = DataChecks(
[InvalidTargetDataCheck],
{
"InvalidTargetDataCheck": {
"problem_type": "multiclass",
"objective": objective,
}
},
)
assert data_checks.validate(X, y) == {
"warnings": [],
"errors": [
DataCheckError(
message=f"Target has non-positive values which is not supported for {objective}",
data_check_name=invalid_targets_data_check_name,
message_code=DataCheckMessageCode.TARGET_INCOMPATIBLE_OBJECTIVE,
details={
"Count of offending values": sum(
val <= 0 for val in y.values.flatten()
)
},
).to_dict()
],
"actions": [],
}
X = pd.DataFrame({"column_one": [100, 200, 100, 200, 100]})
y = pd.Series([2, 3, 0, 1, 1])
invalid_targets_check = InvalidTargetDataCheck(
problem_type="regression", objective=objective
)
assert invalid_targets_check.validate(X, y) == {
"warnings": [],
"errors": [
DataCheckError(
message=f"Target has non-positive values which is not supported for {objective}",
data_check_name=invalid_targets_data_check_name,
message_code=DataCheckMessageCode.TARGET_INCOMPATIBLE_OBJECTIVE,
details={
"Count of offending values": sum(
val <= 0 for val in y.values.flatten()
)
},
).to_dict()
],
"actions": [],
}
@pytest.mark.parametrize(
"objective", [RootMeanSquaredLogError(), MeanSquaredLogError(), MAPE()]
)
def test_invalid_target_data_check_invalid_labels_for_nonnegative_objective_instances(
objective,
):
X = pd.DataFrame({"column_one": [100, 200, 100, 200, 200, 100, 200, 100] * 25})
y = pd.Series([2, 2, 3, 3, -1, -1, 1, 1] * 25)
data_checks = DataChecks(
[InvalidTargetDataCheck],
{
"InvalidTargetDataCheck": {
"problem_type": "multiclass",
"objective": objective,
}
},
)
assert data_checks.validate(X, y) == {
"warnings": [],
"errors": [
DataCheckError(
message=f"Target has non-positive values which is not supported for {objective.name}",
data_check_name=invalid_targets_data_check_name,
message_code=DataCheckMessageCode.TARGET_INCOMPATIBLE_OBJECTIVE,
details={
"Count of offending values": sum(
val <= 0 for val in y.values.flatten()
)
},
).to_dict()
],
"actions": [],
}
def test_invalid_target_data_check_invalid_labels_for_objectives(
time_series_core_objectives,
):
X = pd.DataFrame({"column_one": [100, 200, 100, 200, 200, 100, 200, 100] * 25})
y = pd.Series([2, 2, 3, 3, -1, -1, 1, 1] * 25)
for objective in time_series_core_objectives:
if not objective.positive_only:
data_checks = DataChecks(
[InvalidTargetDataCheck],
{
"InvalidTargetDataCheck": {
"problem_type": "multiclass",
"objective": objective,
}
},
)
assert data_checks.validate(X, y) == {
"warnings": [],
"errors": [],
"actions": [],
}
X = pd.DataFrame({"column_one": [100, 200, 100, 200, 100]})
y = pd.Series([2, 3, 0, 1, 1])
for objective in time_series_core_objectives:
if not objective.positive_only:
invalid_targets_check = InvalidTargetDataCheck(
problem_type="regression", objective=objective
)
assert invalid_targets_check.validate(X, y) == {
"warnings": [],
"errors": [],
"actions": [],
}
@pytest.mark.parametrize(
"objective",
[
"Root Mean Squared Log Error",
"Mean Squared Log Error",
"Mean Absolute Percentage Error",
],
)
def test_invalid_target_data_check_valid_labels_for_nonnegative_objectives(objective):
X = pd.DataFrame({"column_one": [100, 100, 200, 300, 100, 200, 100] * 25})
y = pd.Series([2, 2, 3, 3, 1, 1, 1] * 25)
data_checks = DataChecks(
[InvalidTargetDataCheck],
{
"InvalidTargetDataCheck": {
"problem_type": "multiclass",
"objective": objective,
}
},
)
assert data_checks.validate(X, y) == {"warnings": [], "errors": [], "actions": []}
def test_invalid_target_data_check_initialize_with_none_objective():
with pytest.raises(DataCheckInitError, match="Encountered the following error"):
DataChecks(
[InvalidTargetDataCheck],
{
"InvalidTargetDataCheck": {
"problem_type": "multiclass",
"objective": None,
}
},
)
def test_invalid_target_data_check_regression_problem_nonnumeric_data():
y_categorical = pd.Series(["Peace", "Is", "A", "Lie"] * 100)
y_mixed_cat_numeric = pd.Series(["Peace", 2, "A", 4] * 100)
y_integer = | pd.Series([1, 2, 3, 4]) | pandas.Series |
from processing_functions import misc as msc
from processing_functions import general_funcs as gf
import os
import numpy as np
import pandas as pd
import time
import matplotlib.pyplot as plt
import pickle
from processing_functions import validation as vd
######
from PIL import Image
def add_img_outline(imgpath,print_dimensions=True):
# many thx: https://stackoverflow.com/a/11143078/4436950
image = Image.open(imgpath)
w,h = image.size
new_w = w + int(float(w)/250)
new_h = h + int(float(h)/250)
if print_dimensions:
print(w,h,new_w,new_h)
new_size = (new_w,new_h)
new_image = Image.new("RGB",new_size)
new_image.paste(image,((new_size[0]-image.size[0])//2,(new_size[1]-image.size[1])//2))
new_image.save(imgpath.replace('.png','_outline.png'))
#######
# naming for the local folder:
outlocalfolder = "phase3_reloaded"
pickles_list = msc.orderedFileList(vd.PICKLES_PATH,'*.pickle')
t1 = time.time()
#print(pickles_list)
dataDict = {}
for picklepath in pickles_list:
fname = msc.filenameFromPathWtithoutExt(picklepath)
#print(fname)
with open(picklepath,'rb') as picklefile:
dataDict[fname] = pickle.load(picklefile)
#print(dataDict)
ckpts_dict = dict(dataDict['ckpt_rev_dict'])
del dataDict['ckpt_rev_dict']
ckpts_dict_2 = {}
for key in ckpts_dict:
ckpts_dict_2[key] = msc.get_only_parent_dir(ckpts_dict[key])
#print(ckpts_dict_2)
#print(dataDict)
for key in dataDict:
pass
#print()
idxmin_cityscapes = []
# first of all: defining the epochs that will really be used:
for key in dataDict:
dataDict[key] = dataDict[key].rename(index=ckpts_dict_2).sort_index().T
if key == "iou_with_terrain_veg":
mins = dataDict[key].mean().nsmallest(3)
idxmin_cityscapes = list(mins.index)
lastidx = "0"+str(int(idxmin_cityscapes[-1]))
idx_list = list(dataDict[key].columns)
#print(idx_list)
lastidx = idx_list[idx_list.index(lastidx)-1]
#print(lastidx)
elif key == "iou_new_validation":
mins = dataDict[key].mean().nsmallest(1)
idxmin_own= list(mins.index)
#print(idxmin_cityscapes,idxmin_own,lastidx)
# subdividing the dataframes:
for key in dataDict:
#print(dataDict[key])
if "only_trees" in key or "with_terrain_veg" in key:
dataDict[key].drop(columns=idxmin_cityscapes,inplace=True)
idx_list = list(dataDict[key].columns)
#print(idx_list)
split_index = idx_list.index(lastidx)
dataDict[key] = dataDict[key].iloc[:,:split_index]
curr_cols = list(dataDict[key].columns)
curr_cols = list(map(int,curr_cols))
curr_cols = [val+100 for val in curr_cols]
curr_cols = [str(val).zfill(4) for val in curr_cols]
dataDict[key].columns = curr_cols
elif "new_validation" in key:
idx_list = list(dataDict[key].columns)
split_index = idx_list.index(idxmin_own[0])
dataDict[key] = dataDict[key].iloc[:,split_index:]
curr_cols = list(dataDict[key].columns)
curr_cols = list(map(int,curr_cols))
curr_cols = [val-min(curr_cols) for val in curr_cols]
curr_cols = [str(val).zfill(4) for val in curr_cols]
dataDict[key].columns = curr_cols
#print(curr_cols)
current_metric = "mean_epochs_stats"
c_dpi = 900
#print("took",time.time()-t1,"seconds")
# dict for best epochs summary
summary_df_dict = {}
# list of rows with best error metrics
best_epochs_idx = []
for i,key in enumerate(dataDict):
print(key)
newdf = dataDict[key]
print(newdf.head())
mean = newdf.mean()
std = newdf.std()
stderrmean = newdf.sem()
dfmin = newdf.min()
dfmax = newdf.max()
plstd = mean + std
mnstd = mean - std
statsdf = pd.concat([dfmax,plstd,mean,mnstd,dfmin],axis=1)
# for pt-br:
# statsdf.columns = ['máx.','méd.+d.p.','média','méd.-d.p','mín.']
# for en:
statsdf.columns = ['max.','mean+s.d.','mean','mean-s.d.','min.']
#print(statsdf)
#print(std)
# #print(key,key.split("_")[0])
outdir = os.path.join(vd.FIGURES_PATH2,outlocalfolder,current_metric)
msc.create_dir_ifnot_exists(outdir)
figname = os.path.join(outdir,key+"_"+current_metric+".png")
figname_t = os.path.join(outdir,key+"_"+current_metric+"_T.png")
statsdf.plot(style=['-g',':c','-b',':c','-r'],markersize = 2)
# for pt-br
# plt.ylabel('Valor (%)')
# plt.xlabel('Número da Época')
# for en
plt.ylabel('Value')
plt.xlabel('Epoch')
plt.savefig(figname,dpi=c_dpi)
add_img_outline(figname)
plt.close('all')
#print('\n',newdf.head())
bestepochidx = statsdf['mean'].idxmax()
#print(statsdf.T[bestepochidx])
best_epochs_idx.append(bestepochidx)
median = newdf.median()
kurt = newdf.kurtosis()
skewn = newdf.skew()
statsdf2 = | pd.concat([dfmax,median,dfmin,mean,std,stderrmean],axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
__author__ = 'fpajot'
import io
import logging
import pickle
from unittest import TestCase
import boto3
from botocore.exceptions import ClientError
from moto import mock_s3
import pandas
import numpy
from pandas_aws.s3 import get_keys, put_df, get_df, get_df_from_keys
MY_BUCKET = "mymockbucket"
MY_PREFIX = "mockfolder"
AWS_REGION_NAME = 'eu-west-1'
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
@mock_s3
class BaseAWSTest(TestCase):
"""Base class for test cases using moto"""
def setUp(self):
self.data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
self.client = boto3.client("s3", region_name=AWS_REGION_NAME)
s3 = boto3.resource("s3", region_name=AWS_REGION_NAME)
if MY_BUCKET in [b for b in s3.buckets.all()]:
err = "{bucket} should not exist.".format(bucket=MY_BUCKET)
logger.error([b['Name'] for b in self.client.list_buckets()['Buckets']])
raise EnvironmentError(err)
else:
self.client.create_bucket(Bucket=MY_BUCKET, CreateBucketConfiguration={
'LocationConstraint': AWS_REGION_NAME})
logger.debug("Existing buckets:")
logger.debug(self.client.list_buckets()['Buckets'])
def tearDown(self):
s3 = boto3.resource("s3", region_name=AWS_REGION_NAME)
bucket = s3.Bucket(MY_BUCKET)
for key in bucket.objects.all():
key.delete()
bucket.delete()
class GetKeysTests(BaseAWSTest):
"""Test s3.get_keys"""
def setUp(self):
super(GetKeysTests, self).setUp()
for o in range(2):
self.client.put_object(Bucket=MY_BUCKET, Key=MY_PREFIX + '/key' + str(o), Body=str(o))
def tearDown(self):
super(GetKeysTests, self).tearDown()
def test_get_s3_keys_success_one_key(self):
key = next(get_keys(self.client, MY_BUCKET, MaxKeys=1))
self.assertEqual(key, MY_PREFIX + '/key0')
def test_get_s3_keys_failure_one_key(self):
with self.assertRaises(StopIteration):
_ = next(get_keys(self.client, MY_BUCKET, prefix='foo'))
def test_get_s3_keys_success_multi_pages(self):
keys = get_keys(self.client, MY_BUCKET, MaxKeys=1)
self.assertEqual(next(keys), MY_PREFIX + '/key0')
self.assertEqual(next(keys), MY_PREFIX + '/key1')
def test_get_s3_keys_failure_multi_pages(self):
keys = get_keys(self.client, MY_BUCKET)
_ = next(keys)
_ = next(keys)
with self.assertRaises(StopIteration):
_ = next(keys)
def test_get_s3_keys_success_one_key_with_prefix(self):
self.client.put_object(Bucket=MY_BUCKET, Key='key3', Body='awesome body')
keys = list(get_keys(self.client, MY_BUCKET, prefix=MY_PREFIX))
self.assertEqual(keys, [MY_PREFIX + '/key0', MY_PREFIX + '/key1'])
def test_get_s3_keys_success_one_key_with_suffix(self):
self.client.put_object(Bucket=MY_BUCKET, Key='key3.txt', Body='awesome body')
key = next(get_keys(self.client, MY_BUCKET, suffix='.txt'))
self.assertEqual(key, 'key3.txt')
class PutDFTests(BaseAWSTest):
"""Test for s3.put_df"""
def setUp(self):
super(PutDFTests, self).setUp()
def tearDown(self):
super(PutDFTests, self).tearDown()
def test_put_df_failure_unknown_type(self):
o = 'awesome body'
key = MY_PREFIX + '/key1'
with self.assertRaises(TypeError):
put_df(self.client, o, MY_BUCKET, key)
def test_put_df_success_dataframe_to_pickle(self):
o = pandas.DataFrame.from_dict(self.data)
key = MY_PREFIX + '/key1.pickle'
put_df(self.client, o, MY_BUCKET, key, format='pickle')
body = pickle.loads(self.client.get_object(Bucket=MY_BUCKET, Key=key)['Body'].read())
self.assertSequenceEqual(list(o.columns), list(body.columns))
self.assertSequenceEqual(o.iloc[0].tolist(), body.iloc[0].tolist())
def test_put_df_success_dataframe_to_csv(self):
o = pandas.DataFrame.from_dict(self.data)
key = MY_PREFIX + '/<KEY>'
put_df(self.client, o, MY_BUCKET, key, format='csv', index=False)
body = pandas.read_csv(self.client.get_object(Bucket=MY_BUCKET, Key=key)['Body'])
self.assertSequenceEqual(list(o.columns), list(body.columns))
self.assertSequenceEqual(o.iloc[0].tolist(), body.iloc[0].tolist())
def test_put_df_success_dataframe_to_parquet(self):
o = pandas.DataFrame.from_dict(self.data)
key = MY_PREFIX + '/key1.parquet'
put_df(self.client, o, MY_BUCKET, key, format='parquet')
body = pandas.read_parquet(io.BytesIO(self.client.get_object(Bucket=MY_BUCKET, Key=key)['Body'].read()))
self.assertSequenceEqual(list(o.columns), list(body.columns))
self.assertSequenceEqual(o.iloc[0].tolist(), body.iloc[0].tolist())
def test_put_df_success_dataframe_to_excel(self):
o = pandas.DataFrame.from_dict(self.data)
key = MY_PREFIX + '/<KEY>'
put_df(self.client, o, MY_BUCKET, key, format='xlsx')
body = pandas.read_excel(io.BytesIO(self.client.get_object(Bucket=MY_BUCKET, Key=key)['Body'].read()))
self.assertSequenceEqual(list(o.columns), list(body.columns))
self.assertSequenceEqual(o.iloc[0].tolist(), body.iloc[0].tolist())
def test_put_df_failure_dataframe_to_unknown_format(self):
o = pandas.DataFrame.from_dict(self.data)
key = MY_PREFIX + '/key1.txt'
with self.assertRaises(AssertionError):
put_df(self.client, o, MY_BUCKET, key, format='txt')
def test_put_df_success_dataframe_to_csv_with_kwargs(self):
o = pandas.DataFrame.from_dict(self.data)
key = MY_PREFIX + '/<KEY>'
put_df(self.client, o, MY_BUCKET, key, format='csv', sep=';')
body = pandas.read_csv(self.client.get_object(Bucket=MY_BUCKET, Key=key)['Body'], sep=';')
self.assertSequenceEqual(list(o.columns), list(body.columns))
self.assertSequenceEqual(o.iloc[0].tolist(), body.iloc[0].tolist())
def test_put_df_success_dataframe_to_csv_with_compression(self):
o = pandas.DataFrame.from_dict(self.data)
key = MY_PREFIX + '/<KEY>'
put_df(self.client, o, MY_BUCKET, key, compression='gzip')
body = pandas.read_csv(self.client.get_object(Bucket=MY_BUCKET, Key=key)['Body'], compression='gzip')
self.assertSequenceEqual(list(o.columns), list(body.columns))
self.assertSequenceEqual(o.iloc[0].tolist(), body.iloc[0].tolist())
def test_put_df_success_dataframe_to_multiple_csv(self):
o = | pandas.DataFrame.from_dict(self.data) | pandas.DataFrame.from_dict |
from typing import List
from datetime import datetime
from pandas import DataFrame, Series
import pandas as pd
from pydantic import BaseModel, Field
from .measurements import IntensityForecast
from .mixes import GenerationMixDetails, MixComponent
class Region(BaseModel):
region_id: int = Field(..., alias="regionid")
dno_region: str = Field(..., alias="dnoregion")
short_name: str = Field(..., alias="shortname")
def to_series(self):
record = {
"region_id": self.region_id,
"dno_region": self.dno_region,
"short_name": self.short_name,
}
return | Series(record) | pandas.Series |
"""
Copyright 2019 hiraokusky
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pandas as pd
import numpy as np
import re
import json
import sys,os
# pip install git+https://github.com/hiraokusky/snark
from snark import wordnetdb, kanadb
class SynNetDb:
"""
wordnetを簡単に処理するための一時記憶
RDF DBも併せて処理する
word in synset . 同義語, 個物化
synset isa synset . 抽象化, 属性
synset hasa synset . 部分/属性/材質/所有(目的語を抱えている状態の主語)
synset then synset . 状態遷移(主語から主語へ移る), 条件つきの場合は条件の付いたsynsetができる
"""
startdict = None
v = False
def __init__(self, opts=''):
self.v = 'v' in opts
def load_file(self, path):
self.startdict = | pd.read_csv(path) | pandas.read_csv |
import pandas as pd
import numpy as np
from src.configs import *
from src.features.transform import categorical_to_ordinal
import collections
class HousePriceData:
'''
Load House Price data for Kaggle competition
'''
def __init__(self, train_path, test_path):
self.trainset = | pd.read_csv(train_path) | pandas.read_csv |
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
)
import pandas._testing as tm
dt_data = [
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
]
tz_data = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03", tz="US/Eastern"),
]
td_data = [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
]
period_data = [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2011-03", freq="M"),
]
data_dict = {
"bool": [True, False, True],
"int64": [1, 2, 3],
"float64": [1.1, np.nan, 3.3],
"category": Categorical(["X", "Y", "Z"]),
"object": ["a", "b", "c"],
"datetime64[ns]": dt_data,
"datetime64[ns, US/Eastern]": tz_data,
"timedelta64[ns]": td_data,
"period[M]": period_data,
}
class TestConcatAppendCommon:
"""
Test common dtype coercion rules between concat and append.
"""
@pytest.fixture(params=sorted(data_dict.keys()))
def item(self, request):
key = request.param
return key, data_dict[key]
item2 = item
def _check_expected_dtype(self, obj, label):
"""
Check whether obj has expected dtype depending on label
considering not-supported dtypes
"""
if isinstance(obj, Index):
assert obj.dtype == label
elif isinstance(obj, Series):
if label.startswith("period"):
assert obj.dtype == "Period[M]"
else:
assert obj.dtype == label
else:
raise ValueError
def test_dtypes(self, item):
# to confirm test case covers intended dtypes
typ, vals = item
self._check_expected_dtype(Index(vals), typ)
self._check_expected_dtype(Series(vals), typ)
def test_concatlike_same_dtypes(self, item):
# GH 13660
typ1, vals1 = item
vals2 = vals1
vals3 = vals1
if typ1 == "category":
exp_data = Categorical(list(vals1) + list(vals2))
exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3))
else:
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3)
tm.assert_index_equal(res, exp)
# index.append name mismatch
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="y")
res = i1.append(i2)
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# index.append name match
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="x")
res = i1.append(i2)
exp = Index(exp_data, name="x")
tm.assert_index_equal(res, exp)
# cannot append non-index
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append(vals2)
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append([Index(vals2), vals3])
# ----- Series ----- #
# series.append
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True)
exp = Series(exp_data3)
tm.assert_series_equal(res, exp)
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
# name mismatch
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="y")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# name match
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="x")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data, name="x")
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# cannot append non-index
msg = (
r"cannot concatenate object of type '.+'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append(vals2)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append([Series(vals2), vals3])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), vals2])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), Series(vals2), vals3])
def test_concatlike_dtypes_coercion(self, item, item2, request):
# GH 13660
typ1, vals1 = item
typ2, vals2 = item2
vals3 = vals2
# basically infer
exp_index_dtype = None
exp_series_dtype = None
if typ1 == typ2:
# same dtype is tested in test_concatlike_same_dtypes
return
elif typ1 == "category" or typ2 == "category":
# The `vals1 + vals2` below fails bc one of these is a Categorical
# instead of a list; we have separate dedicated tests for categorical
return
warn = None
# specify expected dtype
if typ1 == "bool" and typ2 in ("int64", "float64"):
# series coerces to numeric based on numpy rule
# index doesn't because bool is object dtype
exp_series_dtype = typ2
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif typ2 == "bool" and typ1 in ("int64", "float64"):
exp_series_dtype = typ1
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif (
typ1 == "datetime64[ns, US/Eastern]"
or typ2 == "datetime64[ns, US/Eastern]"
or typ1 == "timedelta64[ns]"
or typ2 == "timedelta64[ns]"
):
exp_index_dtype = object
exp_series_dtype = object
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# ----- Series ----- #
# series._append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(
[Series(vals2), Series(vals3)], ignore_index=True
)
exp = Series(exp_data3, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp)
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
def test_concatlike_common_coerce_to_pandas_object(self):
# GH 13626
# result must be Timestamp/Timedelta, not datetime.datetime/timedelta
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"])
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
exp = Index(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
]
)
res = dti.append(tdi)
tm.assert_index_equal(res, exp)
assert isinstance(res[0], pd.Timestamp)
assert isinstance(res[-1], pd.Timedelta)
dts = Series(dti)
tds = Series(tdi)
res = dts._append(tds)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
res = pd.concat([dts, tds])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
def test_concatlike_datetimetz(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 7795
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz=tz)
exp = pd.DatetimeIndex(
["2011-01-01", "2011-01-02", "2012-01-01", "2012-01-02"], tz=tz
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts2 = Series(dti2)
res = dts1._append(dts2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
@pytest.mark.parametrize("tz", ["UTC", "US/Eastern", "Asia/Tokyo", "EST5EDT"])
def test_concatlike_datetimetz_short(self, tz):
# GH#7795
ix1 = pd.date_range(start="2014-07-15", end="2014-07-17", freq="D", tz=tz)
ix2 = pd.DatetimeIndex(["2014-07-11", "2014-07-21"], tz=tz)
df1 = DataFrame(0, index=ix1, columns=["A", "B"])
df2 = DataFrame(0, index=ix2, columns=["A", "B"])
exp_idx = pd.DatetimeIndex(
["2014-07-15", "2014-07-16", "2014-07-17", "2014-07-11", "2014-07-21"],
tz=tz,
)
exp = DataFrame(0, index=exp_idx, columns=["A", "B"])
tm.assert_frame_equal(df1._append(df2), exp)
tm.assert_frame_equal(pd.concat([df1, df2]), exp)
def test_concatlike_datetimetz_to_object(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 13660
# different tz coerces to object
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"])
exp = Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01"),
pd.Timestamp("2012-01-02"),
],
dtype=object,
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts2 = Series(dti2)
res = dts1._append(dts2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
# different tz
dti3 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz="US/Pacific")
exp = Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01", tz="US/Pacific"),
pd.Timestamp("2012-01-02", tz="US/Pacific"),
],
dtype=object,
)
res = dti1.append(dti3)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts3 = Series(dti3)
res = dts1._append(dts3)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts3])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period(self):
# GH 13660
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi2 = pd.PeriodIndex(["2012-01", "2012-02"], freq="M")
exp = pd.PeriodIndex(["2011-01", "2011-02", "2012-01", "2012-02"], freq="M")
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
ps2 = Series(pi2)
res = ps1._append(ps2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_diff_freq_to_object(self):
# GH 13221
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi2 = pd.PeriodIndex(["2012-01-01", "2012-02-01"], freq="D")
exp = Index(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2012-01-01", freq="D"),
pd.Period("2012-02-01", freq="D"),
],
dtype=object,
)
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
ps2 = Series(pi2)
res = ps1._append(ps2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_mixed_dt_to_object(self):
# GH 13221
# different datetimelike
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
exp = Index(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
],
dtype=object,
)
res = pi1.append(tdi)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
tds = Series(tdi)
res = ps1._append(tds)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, tds])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
# inverse
exp = Index(
[
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
],
dtype=object,
)
res = tdi.append(pi1)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
tds = Series(tdi)
res = tds._append(ps1)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([tds, ps1])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concat_categorical(self):
# GH 13524
# same categories -> category
s1 = Series([1, 2, np.nan], dtype="category")
s2 = Series([2, 1, 2], dtype="category")
exp = Series([1, 2, np.nan, 2, 1, 2], dtype="category")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
# partially different categories => not-category
s1 = Series([3, 2], dtype="category")
s2 = Series([2, 1], dtype="category")
exp = Series([3, 2, 2, 1])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
# completely different categories (same dtype) => not-category
s1 = Series([10, 11, np.nan], dtype="category")
s2 = Series([np.nan, 1, 3, 2], dtype="category")
exp = Series([10, 11, np.nan, np.nan, 1, 3, 2], dtype=np.float64)
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
def test_union_categorical_same_categories_different_order(self):
# https://github.com/pandas-dev/pandas/issues/19096
a = Series(Categorical(["a", "b", "c"], categories=["a", "b", "c"]))
b = Series(Categorical(["a", "b", "c"], categories=["b", "a", "c"]))
result = pd.concat([a, b], ignore_index=True)
expected = Series(
Categorical(["a", "b", "c", "a", "b", "c"], categories=["a", "b", "c"])
)
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
import math
import random
import re
import time
import aiohttp
import discord
from discord.ext import commands
import pandas as pd
pd.set_option('display.max_rows', 1000)
import datetime
from fuzzywuzzy import process
class WaitTimes(commands.Cog):
def __init__(self, client):
self._CACHE_TIME = 60 * 3 # minutes
self.client = client
self.parks = ["WaltDisneyWorldMagicKingdom",
"WaltDisneyWorldEpcot",
"WaltDisneyWorldHollywoodStudios",
"WaltDisneyWorldAnimalKingdom",
"UniversalIslandsOfAdventure",
"UniversalStudiosFlorida"]
self.df_parks_waittime = | pd.DataFrame() | pandas.DataFrame |
from pathlib import Path
import re
import pandas as pd
from spectrai.core import get_schmitter_config
import brukeropusreader
DATA_SPECTRA, DATA_SPECTRA_REP, DATA_MEASUREMENTS = get_schmitter_config()
def load_spectra(path=DATA_SPECTRA):
"""Returns DRIFT/MIRs spectra, Petra's data, Vietnam, 2007-2008"""
path = Path(path)
df_list = []
for i, f in enumerate(path.glob('*.*')):
if f.suffix != '.xls':
file = brukeropusreader.read_file(f)
spectrum = pd.Series(file['AB'])
spectrum_name = _clean_column_name(f.name)
if i == 0:
wavelength = pd.Series(file.get_range("AB"))
data = {'wavenumber': wavelength, spectrum_name: spectrum}
else:
data = {spectrum_name: spectrum}
df_list.append(pd.DataFrame(data))
return pd.concat(df_list, axis=1, ignore_index=False, sort=False).set_index('wavenumber')
def load_spectra_rep(path=DATA_SPECTRA_REP):
"""Returns DRIFT/MIRs spectra and their replicates, Petra's data, Vietnam, 2007-2008"""
path = Path(path)
df_list = []
_ids = []
for i, f in enumerate(path.glob('*.0')):
_id = int(re.search(r'(.*?)_', f.name).group(1))
if (_id in range(3179, 4922)) and (_id not in _ids):
file = brukeropusreader.read_file(f)
if 'AB' in file:
_ids.append(_id)
spectrum = pd.Series(file['AB'])
spectrum_name = _id
if len(df_list) == 0:
wavelength = pd.Series(file.get_range('AB'))
data = {'wavenumber': wavelength, spectrum_name: spectrum}
else:
data = {spectrum_name: spectrum}
df_list.append( | pd.DataFrame(data) | pandas.DataFrame |
# # Procesando varios datasets de diversas fuentes para juntarlos en uno solo
# Objetivo de salida: un dataset con en la que cada fila es una entidad federativa de México y cada columna un indicador sobre dicha entidad.
import pandas as pd
import matplotlib.pyplot as plt
# ## Empezando por los que ya estan en formato csv y tienen el mismo formato.
# Limpiando los datos inncesarios
# Archivos a constantes
DS_DIABETES = 'istabla43_2018.csv' # Detección padecimientos Diabetes por delegación, por año hasta el 2015
DS_HIPERTENSION = 'istabla45_2018.csv' # Detección de padecimientos Hipertensión arterial por delegación, por año hasta el 2015
DS_PADECIMIENTOS = 'istabla39_2018.csv' # Número total de detecciones por delegación, por año hasta el 2015
db = pd.read_csv(DS_DIABETES, encoding='latin')
# Ignorando la fila de totales y seleccionando solo las filas de estados
db = db[1:36]
# Seleccionando solo las columnas de interés
db = db[[db.columns[0], '2015', '2018']]
# Renombrando las columnas a su clave definida en el diccionario del dataset
db = db.rename(columns={
db.columns[0] : 'EDO',
'2015' : 'DET.DIAB.15',
'2018' : 'DET.DIAB.18'
})
# Resultado de procesado de DS_DIABETES
print('DS_DIABETES', db)
# Repitiendo el proceso para DS_HIPERTENSION
hp = pd.read_csv(DS_HIPERTENSION, encoding='latin')
# Ignorando la fila de totales y seleccionando solo las filas de estados
hp = hp[1:36]
# Seleccionando solo las columnas de interés
hp = hp[[hp.columns[0], '2015', '2018']]
# Renombrando las columnas a su clave definida en el diccionario del dataset
hp = hp.rename(columns={
hp.columns[0] : 'EDO',
'2015' : 'DET.HIPT.15',
'2018' : 'DET.HIPT.18'
})
# Resultado parcial de DS_HIPERTENSION
print('DS_HIPERTENSION', hp)
# Repitiendo el proceso para DS_PADECIMIENTOS
pad = | pd.read_csv(DS_PADECIMIENTOS, encoding='latin') | pandas.read_csv |
# coding: utf-8
from __future__ import division
import numpy as np
import scipy.spatial.distance as sd
from scipy.special import gamma
from scipy.linalg import toeplitz
from scipy.optimize import minimize
from scipy.stats import ttest_1samp as ttest
import hypertools as hyp
import pandas as pd
import warnings
from matplotlib import pyplot as plt
gaussian_params = {'var': 100}
laplace_params = {'scale': 100}
eye_params = {}
t_params = {'df': 100}
mexican_hat_params = {'sigma': 10}
uniform_params = {}
boxcar_params = {'width': 10}
def gaussian_weights(T, params=gaussian_params):
if params is None:
params = gaussian_params
c1 = np.divide(1, np.sqrt(2 * np.math.pi * params['var']))
c2 = np.divide(-1, 2 * params['var'])
sqdiffs = toeplitz(np.arange(T) ** 2)
return c1 * np.exp(c2 * sqdiffs)
def laplace_weights(T, params=laplace_params):
if params is None:
params = laplace_params
absdiffs = toeplitz(np.arange(T))
return np.multiply(np.divide(1, 2 * params['scale']), np.exp(-np.divide(absdiffs, params['scale']))) #scale by a factor of 2.5 to prevent near-zero rounding issues
def eye_weights(T, params=eye_params):
return np.eye(T)
def uniform_weights(T, params=uniform_params):
return np.ones([T, T])
def t_weights(T, params=t_params):
if params is None:
params = t_params
c1 = np.divide(gamma((params['df'] + 1) / 2), np.sqrt(params['df'] * np.math.pi) * gamma(params['df'] / 2))
c2 = np.divide(-params['df'] + 1, 2)
sqdiffs = toeplitz(np.arange(T) ** 2)
return np.multiply(c1, np.power(1 + np.divide(sqdiffs, params['df']), c2))
def mexican_hat_weights(T, params=mexican_hat_params):
if params is None:
params = mexican_hat_params
absdiffs = toeplitz(np.arange(T))
sqdiffs = toeplitz(np.arange(T) ** 2)
a = np.divide(2, np.sqrt(3 * params['sigma']) * np.power(np.math.pi, 0.25))
b = 1 - np.power(np.divide(absdiffs, params['sigma']), 2)
c = np.exp(-np.divide(sqdiffs, 2 * np.power(params['sigma'], 2)))
return np.multiply(a, np.multiply(b, c))
def boxcar_weights(T, params=boxcar_params):
if params is None:
params = boxcar_params
return np.multiply(toeplitz(np.arange(T)) < params['width']/2., 1.)
def format_data(data):
def zero_nans(x):
x[np.isnan(x)] = 0
return x
x = hyp.tools.format_data(data, ppca=False, )
return list(map(zero_nans, x))
def _is_empty(dict):
if not bool(dict):
return True
return False
def wcorr(a, b, weights):
'''
Compute moment-by-moment correlations between sets of observations
:param a: a number-of-timepoints by number-of-features observations matrix
:param b: a number-of-timepoints by number-of-features observations matrix
:param weights: a number-of-timepoints by number-of-timepoints weights matrix
specifying the per-timepoint weights to be considered (for each timepoint)
:return: a a.shape[1] by b.shape[1] by weights.shape[0] array of per-timepoint
correlation matrices.
'''
def weighted_var_diffs(x, w):
w[np.isnan(w)] = 0
if np.sum(np.abs(w)) == 0:
weights_tiled = np.ones(x.shape)
else:
weights_tiled = np.tile(w[:, np.newaxis], [1, x.shape[1]])
mx = np.sum(np.multiply(weights_tiled, x), axis=0)[:, np.newaxis].T
diffs = x - np.tile(mx, [x.shape[0], 1])
varx = np.sum(diffs ** 2, axis=0)[:, np.newaxis].T
return varx, diffs
autocorrelation = np.isclose(a, b).all()
corrs = np.zeros([a.shape[1], b.shape[1], weights.shape[1]])
for t in np.arange(weights.shape[1]):
vara, diffs_a = weighted_var_diffs(a, weights[:, t])
if autocorrelation:
varb = vara
diffs_b = diffs_a
else:
varb, diffs_b = weighted_var_diffs(b, weights[:, t])
alpha = np.dot(diffs_a.T, diffs_b)
beta = np.sqrt(np.dot(vara.T, varb))
corrs[:, :, t] = np.divide(alpha, beta)
return corrs
def wisfc(data, timepoint_weights, subject_weights=None):
'''
Compute moment-by-moment correlations between sets of observations
:data: a list of number-of-timepoints by V matrices
:timepoint weights: a number-of-timepoints by number-of-timepoints weights matrix
specifying the per-timepoint weights to be considered (for each timepoint)
:subject weights: number-of-subjects by number-of-subjects weights matrix
:return: a list of number-of-timepoints by (V^2 - V)/2 + V correlation matrices
'''
if type(data) != list:
return wisfc([data], timepoint_weights, subject_weights=subject_weights)[0]
if subject_weights is None:
K = data[0].shape[1]
connectomes = np.zeros([len(data), int((K ** 2 - K) / 2)])
for s in np.arange(len(data)):
connectomes[s, :] = 1 - sd.pdist(data[s].T, metric='correlation')
subject_weights = 1 - sd.squareform(sd.pdist(connectomes, metric='correlation'))
np.fill_diagonal(subject_weights, 0)
elif np.isscalar(subject_weights):
subject_weights = subject_weights * np.ones([len(data), len(data)])
np.fill_diagonal(subject_weights, 0)
corrs = []
for s, a in enumerate(data):
b = weighted_mean(np.stack(data, axis=2), axis=2, weights=subject_weights[s, :])
wc = wcorr(a, b, timepoint_weights)
wc[np.isnan(wc)] = 0
wc[np.isinf(wc)] = 1
try:
corrs.append(mat2vec(wc))
except:
print('mystery!')
return corrs
def isfc(data, timepoint_weights):
if type(data) != list:
return isfc([data], timepoint_weights)[0]
return wisfc(data, timepoint_weights, subject_weights=1 - np.eye(len(data)))
def autofc(data, timepoint_weights):
if type(data) != list:
return autofc([data], timepoint_weights)[0]
return wisfc(data, timepoint_weights, subject_weights=np.eye(len(data)))
def apply_by_row(corrs, f):
'''
apply the function f to the correlation matrix specified in each row, and return a
matrix of the concatenated results
:param corrs: a matrix of vectorized correlation matrices (output of mat2vec), or a list
of such matrices
:param f: a function to apply to each vectorized correlation matrix
:return: a matrix of function outputs (for each row of the given matrices), or a list of
such matrices
'''
if type(corrs) is list:
return list(map(lambda x: apply_by_row(x, f), corrs))
corrs = vec2mat(corrs)
return np.stack(list(map(lambda x: f(np.squeeze(x)), np.split(corrs, corrs.shape[2], axis=2))), axis=0)
def corrmean_combine(corrs):
'''
Compute the mean element-wise correlation across each matrix in a list.
:param corrs: a matrix of vectorized correlation matrices (output of mat2vec), or a list
of such matrices
:return: a mean vectorized correlation matrix
'''
if not (type(corrs) == list):
return corrs
elif np.shape(corrs)[0] == 1:
return corrs
else:
return z2r(np.mean(r2z(np.stack(corrs, axis=2)), axis=2))
def mean_combine(vals):
'''
Compute the element-wise mean across each matrix in a list.
:param vals: a matrix, or a list of matrices
:return: a mean matrix
'''
if not (type(vals) == list):
return vals
else:
return np.mean(np.stack(vals, axis=2), axis=2)
def tstat_combine(corrs, return_pvals=False):
'''
Compute element-wise t-tests (comparing distribution means to 0) across each
correlation matrix in a list.
:param corrs: a matrix of vectorized correlation matrices (output of mat2vec), or a list
of such matrices
:param return_pvals: Boolean (default: False). If True, return a second matrix (or list)
of the corresponding t-tests' p-values
:return: a matrix of t-statistics of the same shape as a matrix of vectorized correlation
matrices
'''
if not (type(corrs) == list):
ts = corrs
ps = np.nan * np.zeros_like(corrs)
else:
ts, ps = ttest(r2z(np.stack(corrs, axis=2)), popmean=0, axis=2)
if return_pvals:
return ts, ps
else:
return ts
def null_combine(corrs):
'''
Placeholder function that returns the input
:param corrs: a matrix of vectorized correlation matrices (output of mat2vec), or a list
of such matrices
:return: the input
'''
return corrs
def reduce(corrs, rfun=None):
'''
:param corrs: a matrix of vectorized correlation matrices (output of mat2vec), or a list
of such matrices
:param rfun: function to use for dimensionality reduction. All hypertools and
scikit-learn functions are supported: PCA, IncrementalPCA, SparsePCA,
MiniBatchSparsePCA, KernelPCA, FastICA, FactorAnalysis, TruncatedSVD,
DictionaryLearning, MiniBatchDictionaryLearning, TSNE, Isomap,
SpectralEmbedding, LocallyLinearEmbedding, MDS, and UMAP.
Can be passed as a string, but for finer control of the model
parameters, pass as a dictionary, e.g.
reduction={‘model’ : ‘PCA’, ‘params’ : {‘whiten’ : True}}.
See scikit-learn specific model docs for details on parameters supported
for each model.
Another option is to use graph theoretic measures computed for each node.
The following measures are supported (via the brainconn toolbox):
eigenvector_centrality, pagerank_centrality, and strength. (Each
of these must be specified as a string; dictionaries not supported.)
Default: None (no dimensionality reduction)
:return: dimensionality-reduced (or original) correlation matrices
'''
try:
import brainconn as bc
_has_brainconn = True
graph_measures = {'eigenvector_centrality': bc.centrality.eigenvector_centrality_und,
'pagerank_centrality': lambda x: bc.centrality.pagerank_centrality(x, d=0.85),
'strength': bc.degree.strengths_und}
except ImportError:
_has_brainconn = False
graph_measures = {'eigenvector_centrality': None,
'pagerank_centrality': None,
'strength': None}
if rfun is None:
return corrs
get_V = lambda x: int(np.divide(np.sqrt(8 * x + 1) - 1, 2))
if type(corrs) is list:
V = get_V(corrs[0].shape[1])
else:
V = get_V(corrs.shape[1])
if _has_brainconn and rfun in graph_measures.keys():
return apply_by_row(corrs, graph_measures[rfun])
elif not _has_brainconn and rfun in graph_measures.keys():
raise ImportError('brainconn is not installed. Please install "git+https://github.com/FIU-Neuro/brainconn#egg=brainconn"')
else:
red_corrs = hyp.reduce(corrs, reduce=rfun, ndims=V)
D = np.shape(red_corrs)[-1]
if D < V :
red_corrs = np.hstack((red_corrs, np.zeros((D, V - D))))
return red_corrs
def smooth(w, windowsize=10, kernel_fun=laplace_weights, kernel_params=laplace_params):
if type(w) is list:
return list(map(lambda x: smooth(x, windowsize=windowsize, kernel_fun=kernel_fun, kernel_params=kernel_params), w))
assert type(windowsize) == int, 'smoothing kernel must have integer width'
k = kernel_fun(windowsize, params=kernel_params)
if iseven(windowsize):
kernel = np.divide(k[int(np.floor(windowsize/2) - 1), :] + k[int(np.ceil(windowsize/2) - 1), :], 2)
else:
kernel = k[int(np.floor(windowsize/2)), :]
kernel /= kernel.sum()
x = np.zeros_like(w)
for i in range(0, w.shape[1]):
x[:, i] = np.convolve(kernel, w[:, i], mode='same')
return x
def timepoint_decoder(data, mu=None, nfolds=2, level=0, cfun=isfc, weights_fun=laplace_weights, weights_params=laplace_params,
combine=mean_combine, rfun=None):
"""
:param data: a list of number-of-observations by number-of-features matrices
:param mu: list of floats sum to one for mixing proportions vector
:param nfolds: number of cross-validation folds (train using out-of-fold data;
test using in-fold data)
:param level: integer or list of integers for levels to be evaluated (default:0)
:param cfun: function for transforming the group data (default: isfc)
:param weights_fun: used to compute per-timepoint weights for cfun; default: laplace_weights
:param weights_params: parameters passed to weights_fun; default: laplace_params
:params combine: function for combining data within each group, or a list of such functions (default: mean_combine)
:param rfun: function for reducing output (default: None)
:return: results dictionary with the following keys:
'rank': mean percentile rank (across all timepoints and folds) in the
decoding distribution of the true timepoint
'accuracy': mean percent accuracy (across all timepoints and folds)
'error': mean estimation error (across all timepoints and folds) between
the decoded and actual window numbers, expressed as a percentage
of the total number of windows
"""
assert len(np.unique(
list(map(lambda x: x.shape[0], data)))) == 1, 'all data matrices must have the same number of timepoints'
assert len(np.unique(
list(map(lambda x: x.shape[1], data)))) == 1, 'all data matrices must have the same number of features'
group_assignments = get_xval_assignments(len(data), nfolds)
orig_level = level
orig_level = np.ravel(orig_level)
if type(level) is int:
level = np.arange(level + 1)
level = np.ravel(level)
assert type(level) is np.ndarray, 'level needs be an integer, list, or np.ndarray'
assert not np.any(level < 0), 'level cannot contain negative numbers'
if mu:
orig_level = level.max()
orig_level = np.ravel(orig_level)
assert np.sum(mu)==1, 'weights must sum to one'
assert np.shape(mu)[0]== level.max()+1, 'weights lengths need to be the same as number of levels'
if not np.all(np.arange(level.max()+1)==level):
level = np.arange(level.max()+1)
if callable(combine):
combine = [combine] * np.shape(level)[0]
combine = np.ravel(combine)
assert type(combine) is np.ndarray and type(combine[0]) is not np.str_, 'combine needs to be a function, list of functions, or np.ndarray of functions'
assert len(level)==len(combine), 'combine length need to be the same as level if input is type np.ndarray or list'
if callable(cfun):
cfun = [cfun] * np.shape(level)[0]
cfun = np.ravel(cfun)
assert type(cfun) is np.ndarray and type(cfun[0]) is not np.str_, 'combine needs be a function, list of functions, or np.ndarray of functions'
assert len(level)==len(cfun), 'cfun length need to be the same as level if input is type np.ndarray or list'
if type(rfun) not in [list, np.ndarray]:
rfun = [rfun] * np.shape(level)[0]
p_rfun = [None] * np.shape(level)[0]
assert len(level)==len(rfun), 'parameter lengths need to be the same as level if input is ' \
'type np.ndarray or list'
results_pd = pd.DataFrame()
corrs = 0
for i in range(0, nfolds):
in_raw = []
out_raw = []
for v in level:
if v==0:
in_data = [x for x in data[group_assignments == i]]
out_data = [x for x in data[group_assignments != i]]
in_smooth, out_smooth, in_raw, out_raw = reduce_wrapper(folding_levels(in_data, out_data, level=v, cfun=None,rfun=p_rfun,
combine=combine, weights_fun=weights_fun,
weights_params=weights_params), level=v, rfun=rfun)
else:
in_smooth, out_smooth, in_raw, out_raw = reduce_wrapper(folding_levels(in_raw, out_raw, level=v, cfun=cfun,
rfun=p_rfun, combine=combine,
weights_fun=weights_fun,
weights_params=weights_params), level=v, rfun=rfun)
if mu:
next_corrs = (1 - sd.cdist(in_smooth, out_smooth, 'correlation'))
corrs += mu[v] * z2r(next_corrs)
else:
corrs = (1 - sd.cdist(in_smooth, out_smooth, 'correlation'))
if v in orig_level:
if mu:
corrs = r2z(corrs)
next_results_pd = decoder(corrs)
next_results_pd['level'] = v
next_results_pd['folds'] = i
results_pd = pd.concat([results_pd, next_results_pd])
return results_pd
def weighted_timepoint_decoder(data, nfolds=2, level=0, optimize_levels=None, cfun=isfc, weights_fun=laplace_weights,
weights_params=laplace_params, combine=mean_combine, rfun=None, opt_init=None):
"""
:param data: a list of number-of-observations by number-of-features matrices
:param nfolds: number of cross-validation folds (train using out-of-fold data;
test using in-fold data)
:param level: integer or list of integers for levels to be evaluated (default:0)
:param cfun: function for transforming the group data (default: isfc)
:param weights_fun: used to compute per-timepoint weights for cfun; default: laplace_weights
:param weights_params: parameters passed to weights_fun; default: laplace_params
:params combine: function for combining data within each group, or a list of such functions (default: mean_combine)
:param rfun: function for reducing output (default: None)
:return: results dictionary with the following keys:
'rank': mean percentile rank (across all timepoints and folds) in the
decoding distribution of the true timepoint
'accuracy': mean percent accuracy (across all timepoints and folds)
'error': mean estimation error (across all timepoints and folds) between
the decoded and actual window numbers, expressed as a percentage
of the total number of windows
"""
assert len(np.unique(
list(map(lambda x: x.shape[0], data)))) == 1, 'all data matrices must have the same number of timepoints'
assert len(np.unique(
list(map(lambda x: x.shape[1], data)))) == 1, 'all data matrices must have the same number of features'
if nfolds == 1:
sub_nfolds = 1
nfolds = 2
warnings.warn('When nfolds is set to one, the analysis will be circular.')
else:
sub_nfolds = 1
group_assignments = get_xval_assignments(len(data), nfolds)
orig_level = level
orig_level = np.ravel(orig_level)
if type(level) is int:
level = np.arange(level + 1)
level = np.ravel(level)
assert type(level) is np.ndarray, 'level needs be an integer, list, or np.ndarray'
assert not np.any(level < 0), 'level cannot contain negative numbers'
if not np.all(np.arange(level.max()+1)==level):
level = np.arange(level.max()+1)
if callable(combine):
combine = [combine] * np.shape(level)[0]
combine = np.ravel(combine)
assert type(combine) is np.ndarray and type(combine[0]) is not np.str_, 'combine needs to be a function, list of ' \
'functions, or np.ndarray of functions'
assert len(level)==len(combine), 'combine length need to be the same as level if input is type np.ndarray or list'
if callable(cfun):
cfun = [cfun] * np.shape(level)[0]
cfun = np.ravel(cfun)
assert type(cfun) is np.ndarray and type(cfun[0]) is not np.str_, 'combine needs be a function, list of functions, ' \
'or np.ndarray of functions'
assert len(level)==len(cfun), 'cfun length need to be the same as level if input is type np.ndarray or list'
if type(rfun) not in [list, np.ndarray]:
rfun = [rfun] * np.shape(level)[0]
p_rfun = [None] * np.shape(level)[0]
assert len(level)==len(rfun), 'parameter lengths need to be the same as level if input is ' \
'type np.ndarray or list'
results_pd = pd.DataFrame()
for i in range(0, nfolds):
in_raw = []
out_raw = []
sub_in_raw = []
sub_out_raw = []
sub_corrs = []
corrs = []
subgroup_assignments = get_xval_assignments(len(data[group_assignments == i]), nfolds)
in_data = [x for x in data[group_assignments == i]]
out_data = [x for x in data[group_assignments != i]]
for v in level:
if v==0:
in_smooth, out_smooth, in_raw, out_raw = folding_levels(in_data, out_data, level=v, cfun=None, rfun=p_rfun,
combine=combine, weights_fun=weights_fun,
weights_params=weights_params)
next_corrs = (1 - sd.cdist(mean_combine([x for x in in_raw]), mean_combine([x for x in out_raw]),
'correlation'))
# next_corrs = (1 - sd.cdist(mean_combine(in_smooth), mean_combine(out_smooth),
# 'correlation'))
corrs.append(next_corrs)
for s in range(0, 1):
sub_in_data = [x for x in data[group_assignments == i][subgroup_assignments==s]]
sub_out_data = [x for x in data[group_assignments == i][subgroup_assignments!=s]]
sub_in_smooth, sub_out_smooth, sub_in_raw, sub_out_raw = folding_levels(sub_in_data, sub_out_data,
level=v, cfun=None, rfun=p_rfun,
combine=combine, weights_fun=weights_fun,
weights_params=weights_params)
next_subcorrs = (1 - sd.cdist(mean_combine([x for x in sub_in_raw]),
mean_combine([x for x in sub_out_raw]), 'correlation'))
# next_subcorrs = (1 - sd.cdist(mean_combine(sub_in_smooth),
# mean_combine(sub_out_smooth), 'correlation'))
sub_corrs.append(next_subcorrs)
else:
in_smooth, out_smooth, in_raw, out_raw = folding_levels(in_raw, out_raw, level=v, cfun=cfun,
rfun=rfun, combine=combine,
weights_fun=weights_fun,
weights_params=weights_params)
next_corrs = (1 - sd.cdist(in_smooth, out_smooth, 'correlation'))
corrs.append(next_corrs)
print('corrs ' + str(v))
for s in range(0, 1):
sub_in_smooth, sub_out_smooth, sub_in_raw, sub_out_raw = folding_levels(sub_in_raw,
sub_out_raw,
level=v,
cfun=cfun,
rfun=rfun,
combine=combine,
weights_fun=weights_fun,
weights_params=weights_params)
print('sub corrs ' + str(v) + str(s))
next_subcorrs = (1 - sd.cdist(sub_in_smooth, sub_out_smooth, 'correlation'))
sub_corrs.append(next_subcorrs)
sub_corrs = np.array(sub_corrs)
corrs = np.array(corrs)
if sub_nfolds == 1:
sub_corrs = corrs
if not optimize_levels:
optimize_levels = range(v+1)
opt_over = []
for lev in optimize_levels:
opt_over.append(lev)
sub_out_corrs = sub_corrs[opt_over,:,:]
out_corrs = corrs[opt_over, :, :]
mu = optimize_weights(sub_out_corrs, opt_init)
w_corrs = weight_corrs(out_corrs, mu)
next_results_pd = decoder(w_corrs)
print(next_results_pd)
next_results_pd['level'] = lev
next_results_pd['folds'] = i
mu_pd = pd.DataFrame()
for c in opt_over:
mu_pd['level_' + str(c)] = [0]
mu_pd += mu
next_results_pd = pd.concat([next_results_pd, mu_pd], axis=1, join_axes=[next_results_pd.index])
results_pd = pd.concat([results_pd, next_results_pd])
return results_pd
def folding_levels(infold_data, outfold_data, level=0, cfun=None, weights_fun=None, weights_params=None, combine=None,
rfun=None):
from .timecorr import timecorr
if rfun is None:
rfun = [None] * np.shape(level)[0]
p_cfun = eval('autofc')
if level == 0:
in_fold_smooth = np.asarray(timecorr([x for x in infold_data], cfun=None,
rfun=rfun[level], combine=combine[level], weights_function=weights_fun,
weights_params=weights_params))
out_fold_smooth = np.asarray(timecorr([x for x in outfold_data], cfun=None,
rfun=rfun[level], combine=combine[level], weights_function=weights_fun,
weights_params=weights_params))
in_fold_raw = infold_data
out_fold_raw = outfold_data
else:
in_fold_smooth = np.asarray(timecorr(list(infold_data), cfun=cfun[level], rfun=rfun[level], combine=combine[level],
weights_function=weights_fun, weights_params=weights_params))
out_fold_smooth = np.asarray(timecorr(list(outfold_data), cfun=cfun[level], rfun=rfun[level], combine=combine[level],
weights_function=weights_fun, weights_params=weights_params))
in_fold_raw = np.asarray(timecorr(list(infold_data), cfun=p_cfun, rfun=rfun[level], combine=null_combine,
weights_function=eye_weights, weights_params=eye_params))
out_fold_raw = np.asarray(timecorr(list(outfold_data), cfun=p_cfun, rfun=rfun[level], combine=null_combine,
weights_function=eye_weights, weights_params=eye_params))
return in_fold_smooth, out_fold_smooth, in_fold_raw, out_fold_raw
def weighted_timepoint_decoder_ec(data, nfolds=2, level=0, optimize_levels=None, cfun=isfc, weights_fun=laplace_weights,
weights_params=laplace_params, combine=mean_combine, rfun=None, opt_init=None):
"""
:param data: a list of number-of-observations by number-of-features matrices
:param nfolds: number of cross-validation folds (train using out-of-fold data;
test using in-fold data)
:param level: integer or list of integers for levels to be evaluated (default:0)
:param cfun: function for transforming the group data (default: isfc)
:param weights_fun: used to compute per-timepoint weights for cfun; default: laplace_weights
:param weights_params: parameters passed to weights_fun; default: laplace_params
:params combine: function for combining data within each group, or a list of such functions (default: mean_combine)
:param rfun: function for reducing output (default: None)
:return: results dictionary with the following keys:
'rank': mean percentile rank (across all timepoints and folds) in the
decoding distribution of the true timepoint
'accuracy': mean percent accuracy (across all timepoints and folds)
'error': mean estimation error (across all timepoints and folds) between
the decoded and actual window numbers, expressed as a percentage
of the total number of windows
"""
if nfolds == 1:
sub_nfolds = 1
nfolds = 2
warnings.warn('When nfolds is set to one, the analysis will be circular.')
else:
sub_nfolds = 1
group_assignments = get_xval_assignments(data.shape[1], nfolds)
orig_level = level
orig_level = np.ravel(orig_level)
if type(level) is int:
level = np.arange(level + 1)
level = np.ravel(level)
assert type(level) is np.ndarray, 'level needs be an integer, list, or np.ndarray'
assert not np.any(level < 0), 'level cannot contain negative numbers'
if not np.all(np.arange(level.max()+1)==level):
level = np.arange(level.max()+1)
if callable(combine):
combine = [combine] * np.shape(level)[0]
combine = np.ravel(combine)
assert type(combine) is np.ndarray and type(combine[0]) is not np.str_, 'combine needs to be a function, list of ' \
'functions, or np.ndarray of functions'
assert len(level)==len(combine), 'combine length need to be the same as level if input is type np.ndarray or list'
if callable(cfun):
cfun = [cfun] * np.shape(level)[0]
cfun = np.ravel(cfun)
assert type(cfun) is np.ndarray and type(cfun[0]) is not np.str_, 'combine needs be a function, list of functions, ' \
'or np.ndarray of functions'
assert len(level)==len(cfun), 'cfun length need to be the same as level if input is type np.ndarray or list'
if type(rfun) not in [list, np.ndarray]:
rfun = [rfun] * np.shape(level)[0]
p_rfun = [None] * np.shape(level)[0]
assert len(level)==len(rfun), 'parameter lengths need to be the same as level if input is ' \
'type np.ndarray or list'
results_pd = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
import jellyfish
def find_closest_string(list_string, list_candidate, no_perfect_match=True):
list_df = []
for k in range(len(list_string)):
for c in range(len(list_candidate)):
match_name = list_string[k]
candidate = list_candidate[c]
if not pd.isna(match_name):
if not | pd.isna(candidate) | pandas.isna |
"""
.. module:: repeats
:synopsis: Repeats (transposon) related stuffs
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import csv
import subprocess
import os
import gzip
import glob
import logging
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger(__name__)
import uuid
import pandas as PD
import numpy as N
import matplotlib.pylab as P
from jgem import utils as UT
from jgem import fasta as FA
from jgem import filenames as FN
from jgem import bedtools as BT
from jgem import gtfgffbed as GGB
from jgem import assembler2 as A2
from jgem import assembler3 as A3
RMSKPARAMS = dict(
np = 4,
th_uexon=4,
th_bp_ovl=50,
th_ex_ovl=50,
datacode='',
gname='gname',
)
def filter_paths(mdstpre, rdstpre):
ex = UT.read_pandas(rdstpre+'.ex.txt.gz')
def select_chromwise(paths, ex):
npchrs = []
for chrom in paths['chr'].unique():
pchr = paths[paths['chr']==chrom]
echr = ex[ex['chr']==chrom]
exnames = set(echr['name'].values)
#e2gname = UT.df2dict(echr,'name','gname')
idx = [all([x in exnames for x in y.split('|')]) for y in pchr['name']]
npchrs.append(pchr[idx])
return PD.concat(npchrs, ignore_index=True)
paths = GGB.read_bed(mdstpre+'.paths.withse.bed.gz')
npaths = select_chromwise(paths, ex)
GGB.write_bed(npaths, rdstpre+'.paths.withse.bed.gz', ncols=12)
paths = GGB.read_bed(mdstpre+'.paths.txt.gz')
npaths = select_chromwise(paths, ex)
GGB.write_bed(npaths, rdstpre+'.paths.txt.gz', ncols=12)
def filter_sjexdf(mdstpre, rdstpre):
exdf = UT.read_pandas(mdstpre+'.exdf.txt.gz', names=A3.EXDFCOLS)
sedf = UT.read_pandas(mdstpre+'.sedf.txt.gz', names=A3.EXDFCOLS)
exdf = PD.concat([exdf, sedf], ignore_index=True)
sjdf = UT.read_pandas(mdstpre+'.sjdf.txt.gz', names=A3.SJDFCOLS)
ex = UT.read_pandas(rdstpre+'.ex.txt.gz')
sj = UT.read_pandas(rdstpre+'.sj.txt.gz')
def select_chromwise_df(exdf, ex):
npchrs = []
for chrom in exdf['chr'].unique():
pchr = exdf[exdf['chr']==chrom]
echr = ex[ex['chr']==chrom]
exnames = set(echr['name'].values)
idx = [x in exnames for x in pchr['name']]
npchrs.append(pchr[idx])
return PD.concat(npchrs, ignore_index=True)
nexdf = select_chromwise_df(exdf, ex)
nsjdf = select_chromwise_df(sjdf, sj)
UT.write_pandas(nexdf, rdstpre+'.exdf.txt.gz', '')
UT.write_pandas(nsjdf, rdstpre+'.sjdf.txt.gz', '')
class RmskFilter(object):
"""Filter genes with by overlap to repeat masker.
Args:
sjexpre: path prefix to assembled ex.txt.gz, sj.txt.gz files (optionally unionex.txt.gz )
code: identifier
chromdir: direcotry which contains chromosomes sequences in FASTA format
rmskviz: RepeatMasker viz track (UCSC) converted in BED7 (using jgem.repeats.rmskviz2bed7)
outdir: output directory
"""
def __init__(self, sjexpre, code, chromdir, rmskviz, outdir, **kw):
self.sjexpre = sjexpre
self.prefix = prefix = os.path.join(outdir, code)
self.fnobj = FN.FileNamesBase(prefix)
self.chromdir = chromdir
self.rmskviz = rmskviz
self.gfc = FA.GenomeFASTAChroms(chromdir)
self.params = RMSKPARAMS.copy()
self.params.update(kw)
self.ex = UT.read_pandas(sjexpre+'.ex.txt.gz')
self.sj = UT.read_pandas(sjexpre+'.sj.txt.gz')
if 'glen' not in self.ex or 'tlen' not in self.ex:
if not os.path.exists(sjexpre+'.ci.txt.gz'):
ci = UT.chopintervals(ex, sjexpre+'.ci.txt.gz')
else:
ci = UT.read_ci(sjexpre+'.ci.txt.gz')
UT.set_glen_tlen(self.ex,ci,gidx='_gidx')
UT.write_pandas(self.ex, sjexpre+'.ex.txt.gz', 'h')
uexpath = sjexpre+'.unionex.txt.gz'
if os.path.exists(uexpath):
self.uex = UT.read_pandas(uexpath)
else:
LOG.info('making union exons...saving to {0}'.format(uexpath))
self.uex = UT.make_unionex(self.ex, '_gidx')
UT.write_pandas(self.uex, uexpath, 'h')
def calculate(self):
""" Calculate base pair overlap to repeat using UCSC genome mask of repeats to lower case,
and exon level overlap to repeat using UCSC RepeatMaskerViz track.
ALso make a dataframe containing summary.
"""
pr = self.params
fn = self.fnobj
uex = count_repeats_mp(self.uex, self.gfc, np=pr['np'], col='#repbp')
uex = count_repeats_viz_mp(uex, self.rmskviz, np=pr['np'], idcol='_id', expand=0, col='repnames')
self.ugb = ugb = self._make_gbed(self.ex, self.sj, uex, datacode=pr['datacode'], gname=pr['gname'])
UT.write_pandas(ugb, fn.txtname('all.genes.stats', category='output'), 'h')
def _make_gbed(self, ex, sj, ugb, datacode='', gname='gname'):
# rep%
gr = ugb.groupby('_gidx')
gb2 = gr[['chr',gname,'tlen','glen']].first()
gb2['#repbp'] = gr['#repbp'].sum()
gb2['rep%'] = 100.*gb2['#repbp']/gb2['tlen']
# rmskviz, exon%
gb2['#uexons'] = gr.size()
gbsub = ugb[ugb['repnames']!='.(-1)'] # .(-1) == overlap
gb2['#uexons_rmsk'] = gbsub.groupby('_gidx').size() # num exons overlapping rmskviz
gb2.loc[gb2['#uexons_rmsk'].isnull(),'#uexons_rmsk'] = 0
gb2['rviz%'] = 100.*gb2['#uexons_rmsk']/gb2['#uexons']
gb2['repnames'] = gbsub.groupby('_gidx')['repnames'].apply(lambda x: ';'.join(list(x)))
# locus
gb2['st'] = gr['st'].min()
gb2['ed'] = gr['ed'].max()
gb2['glocus'] = UT.calc_locus(gb2,'chr','st','ed')
# rmskviz, class=[Simple_repeats, LINE, SINE, LTR, DNA]
rcols = ['Simple_repeat','LINE','SINE','LTR','DNA']
for k in rcols:
gb2[k] = gb2['repnames'].str.contains('#'+k)
dc = '_'+datacode if datacode else ''
egr = ex.groupby('_gidx')
gb2['#exons'] = egr.size()
gb2['avgecov'] = egr['ecov'+dc].mean()
if 'gcov' in egr:
gb2['gcov'] = egr['gcov'+dc].first()
sgr = sj.groupby('_gidx')
if 'ucnt' in sgr and 'mcnt' in sgr:
gb2['ucnt'] = sgr['ucnt'+dc].sum()
gb2['mcnt'] = sgr['mcnt'+dc].sum()
gb2['minjcnt'] = sgr['mcnt'+dc].min()
elif 'tcnt' in sgr:
gb2['tcnt'] = sgr['tcnt'+dc].sum()
gb2['#junc'] = sgr.size()
# gb2['lscore'] = N.log10(gb2['tlen']) - N.log10(gb2['glen']) + 2
# gb2['jscore'] = N.log10(gb2['ucnt']) - N.log10(gb2['mcnt']) - 1.5
return gb2
def filter(self, **kw):
""" Filter genes.
base pair repeat overlap % >= th_bp_ovl (default 50)
exon_repeat_overlap % >= th_ex_ovl (default 50)
#union exon < th_uexon (default 4)
That is, by default, it filters out 2,3 exon genes with both base pair and exon level
overlap to repeats are greater or equal to 50%. Does not apply to single exons.
"""
d = self.ugb
pr = self.params
fn = self.fnobj
pr.update(kw)
idx1 = (d['rep%']>=pr['th_bp_ovl'])&(d['rviz%']>pr['th_ex_ovl'])
idx2 = (d['#junc'].notnull())&(d['#uexons']<pr['th_uexon'])
idx = ~(idx1&idx2)
self.ugb2 = ugb2 = d[idx] # filtered
self.ugb3 = ugb3 = d[~idx]
gids = ugb2.index.values
ex0 = self.ex
sj0 = self.sj
uex = self.uex
# filter ex,sj,uex
self.ex2 = ex2 = ex0[ex0['_gidx'].isin(gids)].sort_values(['chr','st','ed'])
self.sj2 = sj2 = sj0[sj0['_gidx'].isin(gids)].sort_values(['chr','st','ed'])
self.uex2 = uex2 = uex[uex['_gidx'].isin(gids)].sort_values(['chr','st','ed'])
gcovfld = 'gcov_'+pr['datacode'] if pr['datacode'] else 'gcov'
self.gbed2 = gbed2 = GGB.unionex2bed12(uex2,name=pr['gname'],sc2=gcovfld,sc1='tlen')
gbed2['sc2'] = gbed2['sc2'].astype(int)
# write out filtered ex,sj,ci,unionex,gbed
UT.write_pandas(ex2, fn.txtname('ex', category='output'), 'h')
UT.write_pandas(sj2, fn.txtname('sj', category='output'), 'h')
UT.chopintervals(ex2, fn.txtname('ci', category='output'))
GGB.write_bed(ex2, fn.bedname('ex', category='output'))
GGB.write_bed(sj2, fn.bedname('sj', category='output'))
UT.write_pandas(uex2, fn.txtname('unionex', category='output'), 'h')
UT.write_pandas(ugb2, fn.txtname('genes.stats', category='output'), 'h')
UT.write_pandas(gbed2, fn.bedname('genes', category='output'), '') # BED12
# also write filtered out genes
self.ex3 = ex3 = ex0[~ex0['_gidx'].isin(gids)].sort_values(['chr','st','ed'])
self.sj3 = sj3 = sj0[~sj0['_gidx'].isin(gids)].sort_values(['chr','st','ed'])
self.uex3 = uex3 = uex[~uex['_gidx'].isin(gids)].sort_values(['chr','st','ed'])
gcovfld = 'gcov_'+pr['datacode'] if pr['datacode'] else 'gcov'
self.gbed3 = gbed3 = GGB.unionex2bed12(uex3,name=pr['gname'],sc2=gcovfld,sc1='tlen')
gbed3['sc2'] = gbed3['sc2'].astype(int)
# write out filtered ex,sj,ci,unionex,gbed
UT.write_pandas(ex3, fn.txtname('removed.ex', category='output'), 'h')
UT.write_pandas(sj3, fn.txtname('removed.sj', category='output'), 'h')
UT.chopintervals(ex3, fn.txtname('removed.ci', category='output'))
UT.write_pandas(uex3, fn.txtname('removed.unionex', category='output'), 'h')
UT.write_pandas(ugb3, fn.txtname('removed.genes.stats', category='output'), 'h')
UT.write_pandas(gbed3, fn.bedname('removed.genes', category='output'), '') # BED12
def save_params(self):
UT.save_json(self.params, self.fnobj.fname('params.json', category='output'))
def __call__(self):
self.calculate()
self.filter()
self.save_params()
filter_paths(self.sjexpre, self.prefix)
filter_sjexdf(self.sjexpre, self.prefix)
def plot_tlen_vs_glen_panels(gbed, fld='rep%', alpha=0.1, ms=0.8):
fig,axr = P.subplots(2,5,figsize=(15,6), sharex=True, sharey=True)
for i,t in enumerate(range(0,100,10)):
ax = axr[int(i/5)][i % 5]
_tvl(gbed, t, t+10, ax=ax, fld=fld, alpha=alpha, ms=ms)
def plot_tlen_vs_glen(gbed, title='', ax=None, mk='b.', ms=0.5, alpha=0.1):
x = N.log10(gbed['glen'])
y = N.log10(gbed['tlen'])
if ax is None:
fig, ax = P.subplots(1,1,figsize=(3,3))
ax.plot(x.values, y.values, mk, ms=ms, alpha=alpha)
ax.set_title('{0} (#{1})'.format(title,len(gbed)))
ax.set_xlabel('log10(glen)')
ax.set_ylabel('log10(tlen)')
ax.set_xlim(1,7)
ax.set_ylim(1.5,5.5)
def _tvl(gbed, t0, t1, alpha=0.1, ax=None, fld='rep%',ms=0.1):
idx10 = (gbed[fld]>=t0)&(gbed[fld]<=t1)
x = N.log10(gbed['glen'])
y = N.log10(gbed['tlen'])
if ax is None:
fig, ax = P.subplots(1,1,figsize=(3,3))
ax.plot(x[idx10].values, y[idx10].values, 'b.', ms=ms, alpha=alpha)
ax.set_title('{0}<={3}<={1} ({2})'.format(t0,t1,N.sum(idx10),fld))
#ax.axhline(3.2)
ax.set_xlabel('log10(glen)')
ax.set_ylabel('log10(tlen)')
ax.set_xlim(1,7)
ax.set_ylim(1.5,5.5)
def count_repeats(beddf, genomefastaobj, col='#repbp', returnseq=False, seqcol='seq'):
"""Looks up genome sequence and counts the number of lower characters.
(RepeatMaker masked sequence are set to lower characters in UCSC genome)
Args:
beddf: Pandas DataFrame with chr,st,ed columns, when calculating repeats bp
for genes, unioned bed should be used (use utils.make_unionex)
genomefastaobj: an object with get(chr,st,ed) method that returns sequence
(use fasta.GenomeFASTAChroms).
col: column names where counts will be put in
returnseq (bool): whether to return sequence or not (default False)
seqcol: column where sequences are put in (default seq)
Outputs:
are put into beddf columns with colname col(default #repbp)
"""
def _cnt(chrom,st,ed):
seq = genomefastaobj.get(chrom,st,ed)
return N.sum([x.islower() for x in seq])
if returnseq:
beddf[seqcol] = [genomefastaobj.get(*x) for x in beddf[['chr','st','ed']].values]
beddf[col] = beddf[seqcol].apply(lambda x: N.sum([y.islower() for y in x]))
else:
beddf[col] = [_cnt(*x) for x in beddf[['chr','st','ed']].values]
return beddf
def count_repeats_mp(beddf, genomefastaobj, col='#repbp', returnseq=False, seqcol='seq', idfld='_id', np=4):
""" MultiCPU version of counts_repeats """
# only send relevant part i.e. chr,st,ed,id
if not idfld in beddf:
beddf[idfld] = N.arange(len(beddf))
# number per CPU
n = int(N.ceil(len(beddf)/float(np))) # per CPU
args = [(beddf.iloc[i*n:(i+1)*n],genomefastaobj,col,returnseq,seqcol) for i in range(np)]
rslts = UT.process_mp(count_repeats, args, np=np, doreduce=False)
df = PD.concat(rslts, ignore_index=True)
i2c = UT.df2dict(df, idfld, col)
beddf[col] = [i2c[x] for x in beddf[idfld]]
if returnseq:
i2s = UT.df2dict(df, idfld, seqcol)
beddf[seqcol] = [i2s[x] for x in beddf[idfld]]
return beddf
def count_repeats_viz_mp(beddf, rmskvizpath, idcol='_id', np=3, prefix=None, expand=0, col='repnames'):
"""Use rmsk-viz track and check each (unioned) exon overlaps with repeats and report repeat name(s).
Uses Bedtools and calculates chromosome-wise.
Args:
beddf: Pandas DataFrame with chr,st,ed cols, when calculating repeats bp
for genes, unioned bed should be used (use utils.make_unionex)
idcol: colname for unique row id (default _id)
rmskvizpath: path to repeat masker viz BED7 file (created using rmskviz2bed7)
np: number of CPU to use
prefix: path prefix for temp file, if not None temp files are kept. (default None)
expand: how many bases to expand exon region in each side (default 0)
col: column name to put in overlapping repeat names (if multiple comma separated)
Outputs:
are put into beddf columns with colname col(default repnames)
"""
cleanup = False
if prefix is None:
cleanup = True
prefix = os.path.join(os.path.dirname(rmskvizpath), str(uuid.uuid4())+'_')
# chrom-wise
chroms = sorted(beddf['chr'].unique())
# check whether rmskviz is already split
splitrmsk=False
for chrom in chroms:
rpath = rmskvizpath+'.{0}.bed.gz'.format(chrom) # reuse
if not os.path.exists(rpath):
splitrmsk=True
break
if splitrmsk:
rmsk = GGB.read_bed(rmskvizpath)
args = []
bfiles = []
ofiles = []
for chrom in chroms:
bpath = prefix+'tgt.{0}.bed'.format(chrom) # don't compress
rpath = rmskvizpath+'.{0}.bed.gz'.format(chrom) # reuse
if expand>0:
bchr = beddf[beddf['chr']==chrom].copy()
bchr['st'] = bchr['st'] - expand
bchr['ed'] = bchr['ed'] + expand
bchr.loc[bchr['st']<0,'st'] = 0
else:
bchr = beddf[beddf['chr']==chrom]
UT.write_pandas(bchr[['chr','st','ed',idcol]], bpath, '')
bfiles.append(bpath)
if splitrmsk:
rchr = rmsk[rmsk['chr']==chrom]
UT.write_pandas(rchr[['chr','st','ed','name','strand']], rpath, '')
opath = prefix+'out.{0}.bed'.format(chrom)
ofiles.append(opath)
args.append([bpath, rpath, opath])
rslts = UT.process_mp(count_repeats_viz_chr, args, np=np, doreduce=False)
# gather outputs
cols = ['name','repnames']
outs = [UT.read_pandas(f, names=cols) for f in ofiles]
df = PD.concat(outs, ignore_index=True)
df['name'] = df['name'].astype(str)
i2rn = UT.df2dict(df, 'name', 'repnames')
beddf[col] = [i2rn[str(x)] for x in beddf[idcol]]
# cleanup
if cleanup:
for f in bfiles:
os.unlink(f)
for f in ofiles:
os.unlink(f)
return beddf
def count_repeats_viz_chr(bedpath, rmskpath, outpath):
c = BT.bedtoolintersect(bedpath, rmskpath, outpath, wao=True)
cols = ['chr','st','ed','name','b_chr','b_st','b_ed','b_name','strand','ovl']
df = UT.read_pandas(c, names=cols)
df['rn'] = df['b_name']+'('+df['strand']+')'
# group and concat repname
dg = df.groupby('name')['rn'].apply(lambda x: ','.join(list(x))).reset_index()
UT.write_pandas(dg, outpath, 'h')
def rmskviz2bed7(df):
"""Convert UCSC repeatmasker viz track download to BED7 format"""
cols = ['chrom','chromStart','chromEnd','name','score','strand',
'alignStart','alignEnd','blockSizes','blockRelStarts','id']
# => chr[0],st(*),ed(*),name[3],sc1[4],strand[5],tst(id[-1])
# st,ed: calculate from blocks, only use non -1 starts
# st0[1],ed0[2],bsizes[-3],bstarts[-2]
cols1 = ['chr','st','ed','name','sc1','strand','tst']
def _gen():
for x in UT.izipcols(df, cols):
rec = [x[0],0,0,x[3],x[4],x[5],x[-1]]
bsizes = [int(y) for y in x[-3].split(',')]
bstarts = [int(y) for y in x[-2].split(',')]
for y,z in zip(bstarts,bsizes):
if y>=0:
rec[1] = x[1]+y
rec[2] = x[1]+y+z
yield rec.copy()
rows = [x for x in _gen()]
df = | PD.DataFrame(rows, columns=cols1) | pandas.DataFrame |
import pandas as pd
import networkx as nx
import pytest
from kgextension.feature_selection import hill_climbing_filter, hierarchy_based_filter, tree_based_filter
from kgextension.generator import specific_relation_generator, direct_type_generator
class TestHillCLimbingFilter:
def test1_high_beta(self):
input_df = pd.read_csv("test/data/feature_selection/hill_climbing_test1_input.csv")
input_DG = nx.DiGraph()
labels = ['http://chancellor', 'http://president', 'http://European_politician',
'http://head_of_state', 'http://politician', 'http://man', 'http://person', 'http://being']
input_DG.add_nodes_from(labels)
input_DG.add_edges_from([('http://chancellor', 'http://politician'), ('http://president', 'http://politician'),
('http://chancellor', 'http://head_of_state'), ('http://president', 'http://head_of_state'), ('http://head_of_state', 'http://person'),
('http://European_politician', 'http://politician'), ('http://politician', 'http://person'),
('http://man', 'http://person'), ('http://person', 'http://being')])
expected_df = pd.read_csv("test/data/feature_selection/hill_climbing_test1_expected.csv")
output_df = hill_climbing_filter(input_df, 'uri_bool_http://class', G= input_DG, beta=0.5, k=2)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test2_generator_data_low_beta(self):
df = pd.DataFrame({
'entities': ['Paris', 'Buenos Aires', 'Mannheim', "München"],
'link': ['http://dbpedia.org/resource/Paris', 'http://dbpedia.org/resource/Buenos_Aires',
'http://dbpedia.org/resource/Mannheim', 'http://dbpedia.org/resource/Munich']
})
input_df = specific_relation_generator(
df, columns=['link'], hierarchy_relation='http://www.w3.org/2004/02/skos/core#broader')
expected_df = pd.read_csv("test/data/feature_selection/hill_climbing_test2_expected.csv")
output_df = hill_climbing_filter(input_df, 'link_in_boolean_http://dbpedia.org/resource/Category:Prefectures_in_France', beta=0.05, k=3)
pd.testing.assert_frame_equal(output_df, expected_df, check_like=True)
def test3_nan(self):
input_df = pd.read_csv("test/data/feature_selection/hill_climbing_test3_input.csv")
input_DG = nx.DiGraph()
labels = ['http://chancellor', 'http://president', 'http://European_politician',
'http://head_of_state', 'http://politician', 'http://man', 'http://person', 'http://being']
input_DG.add_nodes_from(labels)
input_DG.add_edges_from([('http://chancellor', 'http://politician'), ('http://president', 'http://politician'),
('http://chancellor', 'http://head_of_state'), ('http://president', 'http://head_of_state'), ('http://head_of_state', 'http://person'),
('http://European_politician', 'http://politician'), ('http://politician', 'http://person'),
('http://man', 'http://person'), ('http://person', 'http://being')])
expected_df = | pd.read_csv("test/data/feature_selection/hill_climbing_test3_expected.csv") | pandas.read_csv |
from tcrdist.public import _neighbors_sparse_variable_radius, _neighbors_sparse_fixed_radius
import pandas as pd
def join_by_dist(
csrmat,
left_df,
right_df,
how = "inner",
left_cols = ['cdr3_b_aa','v_b_gene','j_b_gene','subject'],
right_cols = ['cdr3_b_aa','v_b_gene','j_b_gene','subject'],
left_suffix = '_x',
right_suffix = '_y',
max_n= 5,
radius = 1,
radius_list = None,
sort_by_dist = True):
"""
Join two sets of TCRs, based on a distance threshold
encoded in a sparse matrix `csrmat`.
The TCRs in the Left-DataFrame, are joined with TCRs in the Right-Dataframe,
for up to `max_n` closest TCRs where the paired distance is less that
that specifed in the `radius` or `radius_list` arguments.
This is analogous to SQL type joins, except instead of matching common keys, the
rows of the Left and Right DataFrames are merged based on finding simimlar TCRs within
a specified radius of sequence divergence. Intutively, this
permits fuzzy matching between similar TCRs in any two
TCR repertoire data sets.
Crucially, one must provide a scipy.sparse csr matrix which can be pre-computed using.
:py:func:`tcrdist.rep_funcs.compute_pws_sparse` or
:py:func:`tcrdist.reperotire.TCRrep.compute_sparse_rect_distances`
It is also possible to join using a unique radius for each sequence in Left-DataFrame
using the `radius_list` argument instead of the fixed `radius` argument.
However, if using a radius list, it must match the number of rows in the csrmat
and the number of rows in Left DataFrame (i.e., len(radius_list) == left_df.shape[1] ).
Parameters
----------
csrmat : scipy.sparse.matrix
rows must correspond to index of left_df
columns must correspond to index of right_df
left_df: pandas.DataFrame
Clone DataFrame
right_df: pandas.DataFrame
Clone DataFrame
how : str
Must be one of 'inner','left', or'outer', which determines the type of merge to be performed.
* 'inner' use intersection of top max_n neigbors between Left and Right DataFrames, droping rows where there is no match.
* 'left' use top max_n rows from Right DataFrame neighboring rows in from Left DataFrame or produce NA where a TCR in Left DataFrame has no neighbor in the Right DataFrame.
* 'outer' a FULL OUTER JOIN combines top max_n neighbors of both left and right DataFrame, producing NAs where a TCR in either the Right or Left DataFrame has no neighbor in other DataFrame.
* (hint: right joins are not possible, unless you switch input dataframe order and recompute the spase matrix)
left_cols : list
all columns to include in left_df
right_cols : list
all columns to include in right_df
left_suffix : str
appends to left columns
right_suffix : str
appends to right columns
max_neighors : int
limit on number of neighbors to join per row.
For instance if a TCR has 100 neighbors only the first 10 rows in the right df will
be included (this is to avoid massive blowup in cases where knowing
about a few neighbors would suffice)
radius = int
default is 1
Returns
-------
left_right_df : pandas DataFrame
concatenates rows from left and right dataframe for all sequences within a specified distance
"""
assert how in ['inner','left','outer']
if how == "inner":
add_unmatched_left = False
add_unmatched_right= False
elif how == "left":
add_unmatched_left = True
add_unmatched_right= False
elif how == "outer":
add_unmatched_left = True
add_unmatched_right= True
if radius_list is None:
nn = _neighbors_sparse_fixed_radius(csrmat = csrmat, radius = radius)
else:
assert len(radius_list) == left_df.shape[0]
nn = _neighbors_sparse_variable_radius(csrmat = csrmat, radius_list = radius_list)
left_index = list()
right_index = list()
dists = list()
for i,ns in enumerate(nn):
l = len(ns)
if l > 0:
ds = csrmat[i,ns].data
if sort_by_dist:
# Sort n index by dist smallest to largest
# sorted(zip([10,1,0,-1],[100,500,1,10000])) => [(-1, 10000), (0, 1), (1, 500), (10, 100)]
# thus [n for d, n in sorted(zip([10,1,0,-1],[100,500,1,10000]))] => [10000, 1, 500, 100]
ns_ds = [(n,d) for d, n in sorted(zip(ds, ns))]
ns,ds = zip(*ns_ds)
if l > max_n:
l = max_n
left_index.extend([i]*l)
right_index.extend(ns[0:l])
dists.extend(ds[0:l])
left_selection = left_df[left_cols].rename(columns ={k:f"{k}{left_suffix}" for k in left_cols}).iloc[left_index,].reset_index(drop = True)
right_selection = right_df[right_cols].rename(columns ={k:f"{k}{right_suffix}" for k in right_cols}).iloc[right_index,].reset_index(drop = True)
left_right_df = pd.concat([left_selection, right_selection], axis = 1)
left_right_df['dist'] = dists
if add_unmatched_left:
left_index_unmatched = sorted(list(set(left_df.index) - set(left_index)))
left_df_unmatched = left_df[left_cols].rename(columns ={k:f"{k}{left_suffix}" for k in left_cols}).iloc[left_index_unmatched,].reset_index(drop = True)
left_right_df = | pd.concat([left_right_df,left_df_unmatched],axis= 0) | pandas.concat |
# -*- coding: utf-8 -*-
"""Core logic for computing subtrees."""
# standard library imports
import contextlib
import os
import sys
from collections import Counter
from collections import OrderedDict
from itertools import chain
from itertools import combinations
from pathlib import Path
# third-party imports
import networkx as nx
import numpy as np
import pandas as pd
from Bio import SeqIO
# first-party imports
import sh
# module imports
from .common import CLUSTER_HIST_FILE
from .common import NAME
from .common import SEARCH_PATHS
from .common import cluster_set_name
from .common import fasta_records
from .common import get_paths_from_file
from .common import homo_degree_dist_filename
from .common import logger
from .common import protein_properties_filename
from .common import write_tsv_or_parquet
from .protein import Sanitizer
# global constants
STATFILE_SUFFIX = f"-{NAME}_stats.tsv"
ANYFILE_SUFFIX = f"-{NAME}_ids-any.tsv"
ALLFILE_SUFFIX = f"-{NAME}_ids-all.tsv"
CLUSTFILE_SUFFIX = f"-{NAME}_clusts.tsv"
SEQ_FILE_TYPE = "fasta"
UNITS = {
"Mb": {"factor": 1, "outunits": "MB"},
"Gb": {"factor": 1024, "outunits": "MB"},
"s": {"factor": 1, "outunits": "s"},
"m": {"factor": 60, "outunits": "s"},
"h": {"factor": 3600, "outunits": "s"},
}
SEQ_IN_LINE = 6
IDENT_STATS_LINE = 7
FIRST_LOG_LINE = 14
LAST_LOG_LINE = 23
STAT_SUFFIXES = ["size", "mem", "time", "memory"]
RENAME_STATS = {
"throughput": "throughput_seq_s",
"time": "CPU_time",
"max_size": "max_cluster_size",
"avg_size": "avg_cluster_size",
"min_size": "min_cluster_size",
"seqs": "unique_seqs",
"singletons": "singleton_clusters",
}
ID_SEPARATOR = "."
IDENT_LOG_MIN = -3
IDENT_LOG_MAX = 0
FASTA_EXT_LIST = [".faa", ".fa", ".fasta"]
FAA_EXT = "faa"
# helper functions
def read_synonyms(filepath):
"""Read a file of synonymous IDs into a dictionary."""
synonym_dict = {}
try:
synonym_frame = pd.read_csv(filepath, sep="\t")
except FileNotFoundError:
logger.error(f'Synonym tsv file "{filepath}" does not exist')
sys.exit(1)
except pd.errors.EmptyDataError:
logger.error(f'Synonym tsv "{filepath}" is empty')
sys.exit(1)
if len(synonym_frame) > 0:
if "#file" in synonym_frame:
synonym_frame.drop("#file", axis=1, inplace=True)
key = list({("Substr", "Dups")}.intersection({synonym_frame.columns}))[
0
]
for group in synonym_frame.groupby("id"):
synonym_dict[group[0]] = group[1][key]
return synonym_dict
def parse_usearch_log(filepath, rundict):
"""Parse the usearch log file into a stats dictionary."""
with filepath.open() as logfile:
for lineno, line in enumerate(logfile):
if lineno < FIRST_LOG_LINE:
if lineno == SEQ_IN_LINE:
split = line.split()
rundict["seqs_in"] = int(split[0])
rundict["singleton_seqs_in"] = int(split[4])
if lineno == IDENT_STATS_LINE:
split = line.split()
rundict["max_identical_seqs"] = int(split[6].rstrip(","))
rundict["avg_identical_seqs"] = float(split[8])
continue
if lineno > LAST_LOG_LINE:
break
split = line.split()
if split:
stat = split[0].lower()
if split[1] in STAT_SUFFIXES:
stat += "_" + split[1]
val = split[2]
else:
val = split[1].rstrip(",")
# rename poorly-named stats
stat = RENAME_STATS.get(stat, stat)
# strip stats with units at the end
conversion_factor = 1
for unit in UNITS:
if val.endswith(unit):
val = val.rstrip(unit)
conversion_factor = UNITS[unit]["factor"]
stat += "_" + UNITS[unit]["outunits"]
break
# convert string values to int or float where possible
try:
val = int(val)
val *= conversion_factor
except ValueError:
try:
val = float(val)
val *= conversion_factor
except ValueError:
pass
rundict[stat] = val
@contextlib.contextmanager
def in_working_directory(path):
"""Change working directory and return to previous wd on exit."""
original_cwd = Path.cwd()
os.chdir(path)
try:
yield
finally:
os.chdir(original_cwd)
def get_fasta_ids(fasta):
"""Get the IDS from a FASTA file."""
idset = set()
with fasta.open() as fasta_fh:
for line in fasta_fh:
if line.startswith(">"):
idset.add(line.split()[0][1:])
return list(idset)
def parse_chromosome(ident):
"""Parse chromosome identifiers."""
# If ident contains an underscore, work on the
# last part only (e.g., MtrunA17_Chr4g0009691)
undersplit = ident.split("_")
if len(undersplit) > 1:
ident = undersplit[-1].upper()
if ident.startswith("CHR"):
ident = ident[3:]
# Chromosome numbers are integers suffixed by 'G'
try:
chromosome = "Chr" + str(int(ident[: ident.index("G")]))
except ValueError:
chromosome = None
return chromosome
def parse_subids(ident):
"""Parse the subidentifiers from identifiers."""
subids = ident.split(ID_SEPARATOR)
subids += [
chromosome
for chromosome in [parse_chromosome(ident) for ident in subids]
if chromosome is not None
]
return subids
def parse_clusters(outdir, delete=True, count_clusters=True, synonyms=None):
"""Parse clusters, counting occurrances."""
if synonyms is None:
synonyms = {}
cluster_list = []
id_list = []
degree_list = []
size_list = []
degree_counter = Counter()
any_counter = Counter()
all_counter = Counter()
graph = nx.Graph()
for fasta in outdir.glob("*"):
cluster_id = int(fasta.name)
ids = get_fasta_ids(fasta)
if len(synonyms) > 0:
syn_ids = set(ids).intersection(synonyms.keys())
for i in syn_ids:
ids.extend(synonyms[i])
n_ids = len(ids)
degree_list.append(n_ids)
degree_counter.update({n_ids: 1})
id_list += ids
cluster_list += [cluster_id] * n_ids
size_list += [n_ids] * n_ids
# Do 'any' and 'all' counters
id_counter = Counter()
id_counter.update(
chain.from_iterable(
[parse_subids(cluster_id) for cluster_id in ids]
)
)
if count_clusters:
any_counter.update(id_counter.keys())
all_counter.update(
[
cluster_id
for cluster_id in id_counter.keys()
if id_counter[cluster_id] == n_ids
]
)
elif n_ids > 1:
any_counter.update({s: n_ids for s in id_counter.keys()})
all_counter.update(
{
cluster_id: n_ids
for cluster_id in id_counter.keys()
if id_counter[cluster_id] == n_ids
}
)
# Do graph components
graph.add_nodes_from(ids)
if n_ids > 1:
edges = combinations(ids, 2)
graph.add_edges_from(edges, weight=n_ids)
if delete:
fasta.unlink()
if delete:
outdir.rmdir()
return (
graph,
cluster_list,
id_list,
size_list,
degree_list,
degree_counter,
any_counter,
all_counter,
)
def prettyprint_float(val, digits):
"""Print a floating-point value in a nice way."""
format_string = "%." + f"{digits:d}" + "f"
return (format_string % val).rstrip("0").rstrip(".")
def homology_cluster(
seqfile,
identity,
delete=True,
write_ids=False,
do_calc=True,
min_id_freq=0,
substrs=None,
dups=None,
cluster_stats=True,
outname=None,
click_loguru=None,
):
"""Cluster at a global sequence identity threshold."""
try:
usearch = sh.Command("usearch", search_paths=SEARCH_PATHS)
except sh.CommandNotFound:
logger.error("usearch must be installed first.")
sys.exit(1)
try:
inpath, dirpath = get_paths_from_file(seqfile)
except FileNotFoundError:
logger.error(f'Input file "{seqfile}" does not exist!')
sys.exit(1)
stem = inpath.stem
dirpath = inpath.parent
if outname is None:
outname = cluster_set_name(stem, identity)
outdir = f"{outname}/"
logfile = f"{outname}.log"
outfilepath = dirpath / outdir
logfilepath = dirpath / logfile
histfilepath = dirpath / homo_degree_dist_filename(outname)
gmlfilepath = dirpath / f"{outname}.gml"
statfilepath = dirpath / f"{outname}-stats.tsv"
anyfilepath = dirpath / f"{outname}-anyhist.tsv"
allfilepath = dirpath / f"{outname}-allhist.tsv"
idpath = dirpath / f"{outname}-ids.tsv"
if identity == 0.0:
identity_string = "Minimum"
else:
identity_string = f"{prettyprint_float(identity *100, 2)}%"
logger.info(f'{identity_string} sequence identity cluster "{outname}":')
if not delete:
logger.debug(f"Cluster files will be kept in {logfile} and {outdir}")
if cluster_stats and write_ids:
logger.debug(
f"File of cluster ID usage will be written to {anyfilepath} and"
f" {allfilepath}"
)
if not do_calc:
if not logfilepath.exists():
logger.error("Previous results must exist, rerun with --do_calc")
sys.exit(1)
logger.debug("Using previous results for calculation")
if min_id_freq:
logger.debug(
"Minimum number of times ID's must occur to be counted:"
f" {min_id_freq}"
)
synonyms = {}
if substrs is not None:
logger.debug(f"using duplicates in {dirpath / dups}")
synonyms.update(read_synonyms(dirpath / substrs))
if dups is not None:
logger.debug(f"using duplicates in {dirpath/dups}")
synonyms.update(read_synonyms(dirpath / dups))
click_loguru.elapsed_time("Clustering")
if do_calc:
#
# Delete previous results, if any.
#
if outfilepath.exists() and outfilepath.is_file():
outfilepath.unlink()
elif outfilepath.exists() and outfilepath.is_dir():
for file in outfilepath.glob("*"):
file.unlink()
else:
outfilepath.mkdir()
#
# Do the calculation.
#
with in_working_directory(dirpath):
output = usearch(
[
"-cluster_fast",
seqfile,
"-id",
identity,
"-clusters",
outdir,
"-log",
logfile,
]
)
logger.debug(output)
run_stat_dict = OrderedDict([("divergence", 1.0 - identity)])
parse_usearch_log(logfilepath, run_stat_dict)
run_stats = pd.DataFrame(
list(run_stat_dict.items()), columns=["stat", "val"]
)
run_stats.set_index("stat", inplace=True)
write_tsv_or_parquet(run_stats, statfilepath)
if delete:
logfilepath.unlink()
if not cluster_stats:
file_sizes = []
file_names = []
record_counts = []
logger.debug("Ordering clusters by number of records and size.")
for fasta_path in outfilepath.glob("*"):
records, size = fasta_records(fasta_path)
if records == 1:
fasta_path.unlink()
continue
file_names.append(fasta_path.name)
file_sizes.append(size)
record_counts.append(records)
file_frame = pd.DataFrame(
list(zip(file_names, file_sizes, record_counts)),
columns=["name", "size", "seqs"],
)
file_frame.sort_values(
by=["seqs", "size"], ascending=False, inplace=True
)
file_frame["idx"] = range(len(file_frame))
for unused_id, row in file_frame.iterrows():
(outfilepath / row["name"]).rename(
outfilepath / f'{row["idx"]}.fa'
)
file_frame.drop(["name"], axis=1, inplace=True)
file_frame.set_index("idx", inplace=True)
# write_tsv_or_parquet(file_frame, "clusters.tsv")
# cluster histogram
cluster_hist = pd.DataFrame(file_frame["seqs"].value_counts())
cluster_hist.rename(columns={"seqs": "clusters"}, inplace=True)
cluster_hist.index.name = "n"
cluster_hist.sort_index(inplace=True)
total_seqs = sum(file_frame["seqs"])
n_clusters = len(file_frame)
cluster_hist["pct_clusts"] = (
cluster_hist["clusters"] * 100.0 / n_clusters
)
cluster_hist["pct_seqs"] = (
cluster_hist["clusters"] * cluster_hist.index * 100.0 / total_seqs
)
cluster_hist.to_csv(CLUSTER_HIST_FILE, sep="\t", float_format="%06.3f")
return n_clusters, run_stats, cluster_hist
(
cluster_graph,
clusters,
ids,
sizes,
unused_degrees,
degree_counts,
any_counts,
all_counts,
) = parse_clusters( # pylint: disable=unused-variable
outfilepath, delete=delete, synonyms=synonyms
)
#
# Write out list of clusters and ids.
#
id_frame = pd.DataFrame.from_dict(
{
"id": ids,
"hom.cluster": pd.array(clusters, dtype=pd.UInt32Dtype()),
"siz": sizes,
}
)
id_frame.sort_values("siz", ascending=False, inplace=True)
id_frame = id_frame.reindex(
["hom.cluster", "siz", "id"],
axis=1,
)
id_frame.reset_index(inplace=True)
id_frame.drop(["index"], axis=1, inplace=True)
id_frame.to_csv(idpath, sep="\t")
del ids, clusters, sizes, id_frame
click_loguru.elapsed_time("graph")
#
# Write out degree distribution.
#
cluster_hist = pd.DataFrame(
list(degree_counts.items()), columns=["degree", "clusters"]
)
cluster_hist.sort_values(["degree"], inplace=True)
cluster_hist.set_index("degree", inplace=True)
total_clusters = cluster_hist["clusters"].sum()
cluster_hist["pct_total"] = (
cluster_hist["clusters"] * 100.0 / total_clusters
)
cluster_hist.to_csv(histfilepath, sep="\t", float_format="%06.3f")
del degree_counts
#
# Do histograms of "any" and "all" id usage in cluster
#
hist_value = f"{identity:f}"
any_hist = pd.DataFrame(
list(any_counts.items()), columns=["id", hist_value]
)
any_hist.set_index("id", inplace=True)
any_hist.sort_values(hist_value, inplace=True, ascending=False)
all_hist = pd.DataFrame(
list(all_counts.items()), columns=["id", hist_value]
)
all_hist.set_index("id", inplace=True)
all_hist.sort_values(hist_value, inplace=True, ascending=False)
if min_id_freq:
any_hist = any_hist[any_hist[hist_value] > min_id_freq]
all_hist = all_hist[all_hist[hist_value] > min_id_freq]
if write_ids:
any_hist.to_csv(anyfilepath, sep="\t")
all_hist.to_csv(allfilepath, sep="\t")
#
# Compute cluster stats
#
# degree_sequence = sorted([d for n, d in cluster_graph.degree()], reverse=True)
# degreeCount = Counter(degree_sequence)
# degree_hist = pd.DataFrame(list(degreeCount.items()),
# columns=['degree', 'count'])
# degree_hist.set_index('degree', inplace=True)
# degree_hist.sort_values('degree', inplace=True)
# degree_hist.to_csv(histfilepath, sep='\t')
nx.write_gml(cluster_graph, gmlfilepath)
click_loguru.elapsed_time("final")
return run_stats, cluster_graph, cluster_hist, any_hist, all_hist
def cluster_in_steps(seqfile, steps, min_id_freq=0, substrs=None, dups=None):
"""Cluster in steps from low to 100% identity."""
try:
inpath, dirpath = get_paths_from_file(seqfile)
except FileNotFoundError:
logger.error('Input file "%s" does not exist!', seqfile)
sys.exit(1)
stat_path = dirpath / (inpath.stem + STATFILE_SUFFIX)
any_path = dirpath / (inpath.stem + ANYFILE_SUFFIX)
all_path = dirpath / (inpath.stem + ALLFILE_SUFFIX)
logsteps = [1.0] + list(
1.0 - np.logspace(IDENT_LOG_MIN, IDENT_LOG_MAX, num=steps)
)
min_fmt = prettyprint_float(min(logsteps) * 100.0, 2)
max_fmt = prettyprint_float(max(logsteps) * 100.0, 2)
logger.info(
f"Clustering at {steps} levels from {min_fmt}% to {max_fmt}% global"
" sequence identity"
)
stat_list = []
all_frames = []
any_frames = []
for id_level in logsteps:
(
stats,
unused_graph,
unused_hist,
any_,
all_,
) = homology_cluster( # pylint: disable=unused-variable
seqfile,
id_level,
min_id_freq=min_id_freq,
substrs=substrs,
dups=dups,
)
stat_list.append(stats)
any_frames.append(any_)
all_frames.append(all_)
logger.info(f"Collating results on {seqfile}.")
#
# Concatenate and write stats
#
stats = pd.DataFrame(stat_list)
stats.to_csv(stat_path, sep="\t")
#
# Concatenate any/all data
#
any_ = pd.concat(
any_frames, axis=1, join="inner", sort=True, ignore_index=False
)
any_.to_csv(any_path, sep="\t")
all_ = pd.concat(
all_frames, axis=1, join="inner", sort=True, ignore_index=False
)
all_.to_csv(all_path, sep="\t")
def clusters_to_histograms(infile):
"""Compute histograms from cluster file."""
try:
inpath, dirpath = get_paths_from_file(infile)
except FileNotFoundError:
logger.error(f'Input file "{infile}" does not exist!')
sys.exit(1)
histfilepath = dirpath / f"{inpath.stem}-sizedist.tsv"
clusters = | pd.read_csv(dirpath / infile, sep="\t", index_col=0) | pandas.read_csv |
import sys, os, socket
# set directories depending on machine
hostname = socket.gethostname()
if hostname=='tianx-pc':
homeDir = '/analyse/cdhome/'
projDir = '/analyse/Project0257/'
proj0012Dir = '/analyse/Project0012/'
elif hostname[0:7]=='deepnet':
homeDir = '/home/chrisd/'
projDir = '/analyse/Project0257/'
proj0012Dir = '/analyse/Project0012/chrisd/'
sys.path.append(os.path.abspath(homeDir+'dlfaceScripts/'))
import pandas as pd
import numpy as np
from resNetUtils import loadTrainData0th1st
destinDir = projDir+'tripletTxtLists/randAlloc/'
setList = ['train', 'val', 'test']
train_df, val_df, test_df, coltrain_df, colval_df, coltest_df, colleague0_df, colleague1_df = \
loadTrainData0th1st(projDir, costFunc='Multi')
for se in range(len(setList)):
if setList[se]=='train':
thsDf = | pd.concat([train_df, coltrain_df], ignore_index=True) | pandas.concat |
import numpy as np
import pandas as pd
import copy
from supervised.tuner.random_parameters import RandomParameters
from supervised.algorithms.registry import AlgorithmsRegistry
from supervised.tuner.preprocessing_tuner import PreprocessingTuner
from supervised.tuner.hill_climbing import HillClimbing
from supervised.algorithms.registry import (
BINARY_CLASSIFICATION,
MULTICLASS_CLASSIFICATION,
REGRESSION,
)
import logging
from supervised.utils.config import LOG_LEVEL
logger = logging.getLogger(__name__)
logger.setLevel(LOG_LEVEL)
class MljarTuner:
def __init__(self, tuner_params, algorithms, ml_task, validation, seed):
logger.debug("MljarTuner.__init__")
self._start_random_models = tuner_params.get("start_random_models", 5)
self._hill_climbing_steps = tuner_params.get("hill_climbing_steps", 3)
self._top_models_to_improve = tuner_params.get("top_models_to_improve", 3)
self._algorithms = algorithms
self._ml_task = ml_task
self._validation = validation
self._seed = seed
self._unique_params_keys = []
def get_not_so_random_params(self, X, y):
models_cnt = 0
generated_params = []
for model_type in self._algorithms:
for i in range(self._start_random_models):
logger.info("Generate parameters for model #{0}".format(models_cnt + 1))
params = self._get_model_params(model_type, X, y, i + 1)
if params is None:
continue
params["name"] = f"model_{models_cnt + 1}"
unique_params_key = MljarTuner.get_params_key(params)
if unique_params_key not in self._unique_params_keys:
generated_params += [params]
self._unique_params_keys += [unique_params_key]
models_cnt += 1
return generated_params
def get_hill_climbing_params(self, current_models):
# second, hill climbing
for _ in range(self._hill_climbing_steps):
# get models orderer by loss
# TODO: refactor this callbacks.callbacks[0]
models = sorted(
[(m.callbacks.callbacks[0].final_loss, m) for m in current_models],
key=lambda x: x[0],
)
for i in range(min(self._top_models_to_improve, len(models))):
m = models[i][1]
for p in HillClimbing.get(
m.params.get("learner"),
self._ml_task,
len(current_models) + self._seed,
):
logger.info(
"Hill climbing step, for model #{0}".format(
len(current_models) + 1
)
)
if p is not None:
all_params = copy.deepcopy(m.params)
all_params["learner"] = p
all_params["name"] = f"model_{len(current_models) + 1}"
unique_params_key = MljarTuner.get_params_key(all_params)
if unique_params_key not in self._unique_params_keys:
self._unique_params_keys += [unique_params_key]
yield all_params
def _get_model_params(self, model_type, X, y, seed):
model_info = AlgorithmsRegistry.registry[self._ml_task][model_type]
model_params = RandomParameters.get(model_info["params"], seed + self._seed)
required_preprocessing = model_info["required_preprocessing"]
model_additional = model_info["additional"]
preprocessing_params = PreprocessingTuner.get(
required_preprocessing, {"train": {"X": X, "y": y}}, self._ml_task
)
model_params = {
"additional": model_additional,
"preprocessing": preprocessing_params,
"validation": self._validation,
"learner": {
"model_type": model_info["class"].algorithm_short_name,
"ml_task": self._ml_task,
**model_params,
},
}
num_class = (
len(np.unique(y[~ | pd.isnull(y) | pandas.isnull |
import pandas as pd
import numpy as np
from nltk.sentiment.vader import SentimentIntensityAnalyzer as SIA
sheet_names = pd.ExcelFile("BaseData.xlsx").sheet_names
sheet_name_main_L = [sheet_name for (i,sheet_name) in enumerate(sheet_names) if(i%4==2)]
sheet_name_results_L = [sheet_name for (i,sheet_name) in enumerate(sheet_names) if (i%4==3)]
weight_files_L = ["s2r2a-Weights.csv","s3r2a-Weights.csv","s4r2a-Weights.csv",
"s5r2a-Weights.csv","s7r2a-Weights.csv","s8r2a-Weights.csv"]
output_files_L = ["s2r2a.csv","s3r2a.csv","s4r2a.csv","s5r2a.csv","s7r2a.csv","s8r2a.csv"]
sia = SIA()
NAME_COL = [1,2]
NUMER_ORD = [3,4,5,6,7,8,9,10,11,12,13,15,16,17,18,19]
CAT_ORD = [14,27,29]
BIN_PRES = [20,21,22,23,24]
COMM = [25,26]
REL_COLS = []
REL_COLS.extend(NUMER_ORD)
REL_COLS.extend(CAT_ORD)
REL_COLS.extend(BIN_PRES)
REL_COLS.extend(COMM)
REL_COLS = sorted(REL_COLS)
def main():
for(sheet_name_main,sheet_name_result,output_file, weight_file) in \
zip(sheet_name_main_L,sheet_name_results_L,output_files_L, weight_files_L):
data = pd.read_excel("BaseData.xlsx",sheet_name=sheet_name_main)
new_header = data.iloc[0]
data = data[1:]
data.columns = new_header
data2 = data.iloc[:, REL_COLS]
data3 = data.iloc[:, NAME_COL]
for i in NUMER_ORD:
MAX,MIN = data.iloc[:,i].min(), data.iloc[:,i].max()
data.iloc[:,i] = data.iloc[:,i].apply(lambda x: (x - MIN)/(MAX - MIN) )
for i in CAT_ORD:
if(i==14):
def helper_14(x):
if("somewhat" in str(x).lower()): return 0.5
elif("highly" in str(x).lower()): return 1.0
else: return 0.0
data.iloc[:,i] = data.iloc[:,i].apply(lambda x: helper_14(x))
if(i==27):
def helper_27(x):
if("can be there" in str(x).lower()): return 1.0
elif("no chance" in str(x).lower()): return 0.0
else: return 0.5
try: data.iloc[:,i] = data.iloc[:,i].apply(lambda x: helper_27(x))
except: continue
if(i==29):
def helper_29(x):
if("low" in str(x).lower()): return 0.0
elif("high" in str(x).lower()): return 1.0
else: return 0.5
data.iloc[:,i] = data.iloc[:,i].apply(lambda x: helper_29(x))
for i in BIN_PRES:
data.iloc[:,i] = data.iloc[:,i].apply(lambda x: int(pd.isna(x)))
for i in COMM:
def helper_COMM(x):
if( | pd.isna(x) | pandas.isna |
"""
Preprocess sites scripts.
Written by <NAME>.
Winter 2020
"""
import os
import configparser
import json
import csv
import math
import glob
import pandas as pd
import geopandas as gpd
import pyproj
from shapely.geometry import Polygon, MultiPolygon, mapping, shape, MultiLineString, LineString
from shapely.ops import transform, unary_union, nearest_points
import fiona
from fiona.crs import from_epsg
import rasterio
from rasterio.mask import mask
from rasterstats import zonal_stats
import networkx as nx
from rtree import index
import numpy as np
import random
CONFIG = configparser.ConfigParser()
CONFIG.read(os.path.join(os.path.dirname(__file__), 'script_config.ini'))
BASE_PATH = CONFIG['file_locations']['base_path']
DATA_RAW = os.path.join(BASE_PATH, 'raw')
DATA_INTERMEDIATE = os.path.join(BASE_PATH, 'intermediate')
DATA_PROCESSED = os.path.join(BASE_PATH, 'processed')
def find_country_list(continent_list):
"""
This function produces country information by continent.
Parameters
----------
continent_list : list
Contains the name of the desired continent, e.g. ['Africa']
Returns
-------
countries : list of dicts
Contains all desired country information for countries in
the stated continent.
"""
glob_info_path = os.path.join(BASE_PATH, 'global_information.csv')
countries = pd.read_csv(glob_info_path, encoding = "ISO-8859-1")
countries = countries[countries.exclude != 1]
if len(continent_list) > 0:
data = countries.loc[countries['continent'].isin(continent_list)]
else:
data = countries
output = []
for index, country in data.iterrows():
output.append({
'country_name': country['country'],
'iso3': country['ISO_3digit'],
'iso2': country['ISO_2digit'],
'regional_level': country['lowest'],
'region': country['region']
})
return output
def process_coverage_shapes(country):
"""
Load in coverage maps, process and export for each country.
Parameters
----------
country : string
Three digit ISO country code.
"""
iso3 = country['iso3']
iso2 = country['iso2']
technologies = [
'GSM',
'3G',
'4G'
]
for tech in technologies:
folder_coverage = os.path.join(DATA_INTERMEDIATE, iso3, 'coverage')
filename = 'coverage_{}.shp'.format(tech)
path_output = os.path.join(folder_coverage, filename)
if os.path.exists(path_output):
continue
print('----')
print('Working on {} in {}'.format(tech, iso3))
filename = 'Inclusions_201812_{}.shp'.format(tech)
folder = os.path.join(DATA_RAW, 'mobile_coverage_explorer',
'Data_MCE')
inclusions = gpd.read_file(os.path.join(folder, filename))
if iso2 in inclusions['CNTRY_ISO2']:
filename = 'MCE_201812_{}.shp'.format(tech)
folder = os.path.join(DATA_RAW, 'mobile_coverage_explorer',
'Data_MCE')
coverage = gpd.read_file(os.path.join(folder, filename))
coverage = coverage.loc[coverage['CNTRY_ISO3'] == iso3]
else:
filename = 'OCI_201812_{}.shp'.format(tech)
folder = os.path.join(DATA_RAW, 'mobile_coverage_explorer',
'Data_OCI')
coverage = gpd.read_file(os.path.join(folder, filename))
coverage = coverage.loc[coverage['CNTRY_ISO3'] == iso3]
if len(coverage) > 0:
print('Dissolving polygons')
coverage['dissolve'] = 1
coverage = coverage.dissolve(by='dissolve', aggfunc='sum')
coverage = coverage.to_crs('epsg:3857')
print('Excluding small shapes')
coverage['geometry'] = coverage.apply(clean_coverage,axis=1)
print('Removing empty and null geometries')
coverage = coverage[~(coverage['geometry'].is_empty)]
coverage = coverage[coverage['geometry'].notnull()]
print('Simplifying geometries')
coverage['geometry'] = coverage.simplify(
tolerance = 0.005,
preserve_topology=True).buffer(0.0001).simplify(
tolerance = 0.005,
preserve_topology=True
)
coverage = coverage.to_crs('epsg:4326')
if not os.path.exists(folder_coverage):
os.makedirs(folder_coverage)
coverage.to_file(path_output, driver='ESRI Shapefile')
return #print('Processed coverage shapes')
def process_regional_coverage(country):
"""
This functions estimates the area covered by each cellular
technology.
Parameters
----------
country : dict
Contains specific country parameters.
Returns
-------
output : dict
Results for cellular coverage by each technology for
each region.
"""
level = country['regional_level']
iso3 = country['iso3']
gid_level = 'GID_{}'.format(level)
filename = 'regions_{}_{}.shp'.format(level, iso3)
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'regions')
path = os.path.join(folder, filename)
regions = gpd.read_file(path)
technologies = [
'GSM',
'3G',
'4G'
]
output = {}
for tech in technologies:
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'coverage')
path = os.path.join(folder, 'coverage_{}.shp'.format(tech))
if os.path.exists(path):
coverage = gpd.read_file(path, encoding="utf-8")
segments = gpd.overlay(regions, coverage, how='intersection')
tech_coverage = {}
for idx, region in segments.iterrows():
area_km2 = round(area_of_polygon(region['geometry']) / 1e6)
tech_coverage[region[gid_level]] = area_km2
output[tech] = tech_coverage
return output
def get_regional_data(country):
"""
Extract regional data including luminosity and population.
Parameters
----------
country : string
Three digit ISO country code.
"""
iso3 = country['iso3']
level = country['regional_level']
gid_level = 'GID_{}'.format(level)
path_output = os.path.join(DATA_INTERMEDIATE, iso3, 'regional_coverage.csv')
if os.path.exists(path_output):
return #print('Regional data already exists')
path_country = os.path.join(DATA_INTERMEDIATE, iso3,
'national_outline.shp')
coverage = process_regional_coverage(country)
single_country = gpd.read_file(path_country)
# print('----')
# print('working on {}'.format(iso3))
path_settlements = os.path.join(DATA_INTERMEDIATE, iso3,
'settlements.tif')
filename = 'regions_{}_{}.shp'.format(level, iso3)
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'regions')
path = os.path.join(folder, filename)
regions = gpd.read_file(path)
results = []
for index, region in regions.iterrows():
with rasterio.open(path_settlements) as src:
affine = src.transform
array = src.read(1)
array[array <= 0] = 0
population_summation = [d['sum'] for d in zonal_stats(
region['geometry'],
array,
stats=['sum'],
nodata=0,
affine=affine)][0]
area_km2 = round(area_of_polygon(region['geometry']) / 1e6)
if 'GSM' in [c for c in coverage.keys()]:
if region[gid_level] in coverage['GSM']:
coverage_GSM_km2 = coverage['GSM'][region[gid_level]]
else:
coverage_GSM_km2 = 0
else:
coverage_GSM_km2 = 0
if '3G' in [c for c in coverage.keys()]:
if region[gid_level] in coverage['3G']:
coverage_3G_km2 = coverage['3G'][region[gid_level]]
else:
coverage_3G_km2 = 0
else:
coverage_3G_km2 = 0
if '4G' in [c for c in coverage.keys()]:
if region[gid_level] in coverage['4G']:
coverage_4G_km2 = coverage['4G'][region[gid_level]]
else:
coverage_4G_km2 = 0
else:
coverage_4G_km2 = 0
results.append({
'GID_0': region['GID_0'],
'GID_id': region[gid_level],
'GID_level': gid_level,
# 'mean_luminosity_km2': luminosity_summation / area_km2 if luminosity_summation else 0,
'population': population_summation,
# 'pop_under_10_pop': pop_under_10_pop,
'area_km2': area_km2,
'population_km2': population_summation / area_km2 if population_summation else 0,
# 'pop_adults_km2': ((population_summation - pop_under_10_pop) /
# area_km2 if pop_under_10_pop else 0),
'coverage_GSM_percent': round(coverage_GSM_km2 / area_km2 * 100 if coverage_GSM_km2 else 0, 1),
'coverage_3G_percent': round(coverage_3G_km2 / area_km2 * 100 if coverage_3G_km2 else 0, 1),
'coverage_4G_percent': round(coverage_4G_km2 / area_km2 * 100 if coverage_4G_km2 else 0, 1),
})
# print('Working on backhaul')
backhaul_lut = estimate_backhaul(iso3, country['region'], '2025')
# print('Working on estimating sites')
results = estimate_sites(results, iso3, backhaul_lut)
results_df = pd.DataFrame(results)
results_df.to_csv(path_output, index=False)
# print('Completed {}'.format(single_country.NAME_0.values[0]))
return #print('Completed night lights data querying')
def find_pop_under_10(region, iso3):
"""
Find the estimated population under 10 years old.
Parameters
----------
region : pandas series
The region being modeled.
iso3 : string
ISO3 country code.
Returns
-------
population : int
Population sum under 10 years of age.
"""
path = os.path.join(DATA_INTERMEDIATE, iso3, 'under_10')
all_paths = glob.glob(path + '/*.tif')
population = []
for path in all_paths:
with rasterio.open(path) as src:
affine = src.transform
array = src.read(1)
array[array <= 0] = 0
population_summation = [d['sum'] for d in zonal_stats(
region['geometry'],
array,
stats=['sum'],
nodata=0,
affine=affine)][0]
if population_summation is not None:
population.append(population_summation)
return sum(population)
def estimate_sites(data, iso3, backhaul_lut):
"""
Estimate the sites by region.
Parameters
----------
data : dataframe
Pandas df with regional data.
iso3 : string
ISO3 country code.
backhaul_lut : dict
Lookup table of backhaul composition.
Returns
-------
output : list of dicts
All regional data with estimated sites.
"""
output = []
existing_site_data_path = os.path.join(DATA_INTERMEDIATE, iso3, 'sites', 'sites.csv')
existing_site_data = {}
if os.path.exists(existing_site_data_path):
site_data = pd.read_csv(existing_site_data_path)
site_data = site_data.to_dict('records')
for item in site_data:
existing_site_data[item['GID_id']] = item['sites']
population = 0
for region in data:
if region['population'] == None:
continue
population += int(region['population'])
path = os.path.join(DATA_RAW, 'wb_mobile_coverage', 'wb_population_coverage_2G.csv')
coverage = pd.read_csv(path, encoding='latin-1')
coverage = coverage.loc[coverage['Country ISO3'] == iso3]
if len(coverage) > 1:
coverage = coverage['2020'].values[0]
else:
coverage = 0
population_covered = population * (coverage / 100)
path = os.path.join(DATA_RAW, 'real_site_data', 'site_counts.csv')
towers = pd.read_csv(path, encoding = "ISO-8859-1")
towers = towers.loc[towers['iso3'] == iso3]
towers = towers['sites'].values[0]
if np.isnan(towers):
towers = 0
towers_per_pop = 0
else:
towers_per_pop = towers / population_covered
tower_backhaul_lut = estimate_backhaul_type(backhaul_lut)
data = sorted(data, key=lambda k: k['population_km2'], reverse=True)
covered_pop_so_far = 0
for region in data:
#first try to use actual data
if len(existing_site_data) > 0:
sites_estimated_total = existing_site_data[region['GID_id']]
if region['area_km2'] > 0:
sites_estimated_km2 = sites_estimated_total / region['area_km2']
else:
sites_estimated_km2 = 0
#or if we don't have data estimates of sites per area
else:
if covered_pop_so_far < population_covered:
sites_estimated_total = region['population'] * towers_per_pop
sites_estimated_km2 = region['population_km2'] * towers_per_pop
else:
sites_estimated_total = 0
sites_estimated_km2 = 0
backhaul_fiber = 0
backhaul_copper = 0
backhaul_wireless = 0
backhaul_satellite = 0
for i in range(1, int(round(sites_estimated_total)) + 1):
num = random.uniform(0, 1)
if num <= tower_backhaul_lut['fiber']:
backhaul_fiber += 1
elif tower_backhaul_lut['fiber'] < num <= tower_backhaul_lut['copper']:
backhaul_copper += 1
elif tower_backhaul_lut['copper'] < num <= tower_backhaul_lut['microwave']:
backhaul_wireless += 1
elif tower_backhaul_lut['microwave'] < num:
backhaul_satellite += 1
output.append({
'GID_0': region['GID_0'],
'GID_id': region['GID_id'],
'GID_level': region['GID_level'],
# 'mean_luminosity_km2': region['mean_luminosity_km2'],
'population': region['population'],
# 'pop_under_10_pop': region['pop_under_10_pop'],
'area_km2': region['area_km2'],
'population_km2': region['population_km2'],
# 'pop_adults_km2': region['pop_adults_km2'],
'coverage_GSM_percent': region['coverage_GSM_percent'],
'coverage_3G_percent': region['coverage_3G_percent'],
'coverage_4G_percent': region['coverage_4G_percent'],
'total_estimated_sites': sites_estimated_total,
'total_estimated_sites_km2': sites_estimated_km2,
'sites_3G': sites_estimated_total * (region['coverage_3G_percent'] /100),
'sites_4G': sites_estimated_total * (region['coverage_4G_percent'] /100),
'backhaul_fiber': backhaul_fiber,
'backhaul_copper': backhaul_copper,
'backhaul_wireless': backhaul_wireless,
'backhaul_satellite': backhaul_satellite,
})
if region['population'] == None:
continue
covered_pop_so_far += region['population']
return output
def estimate_backhaul(iso3, region, year):
"""
Get the correct backhaul composition for the region.
Parameters
----------
iso3 : string
ISO3 country code.
region : string
The continent the country is part of.
year : int
The year of the backhaul composition desired.
Returns
-------
output : list of dicts
All regional data with estimated sites.
"""
output = []
path = os.path.join(BASE_PATH, 'raw', 'gsma', 'backhaul.csv')
backhaul_lut = pd.read_csv(path)
backhaul_lut = backhaul_lut.to_dict('records')
for item in backhaul_lut:
if region == item['Region'] and int(item['Year']) == int(year):
output.append({
'tech': item['Technology'],
'percentage': int(item['Value']),
})
return output
def estimate_backhaul_type(backhaul_lut):
"""
Process the tower backhaul lut.
Parameters
----------
backhaul_lut : dict
Lookup table of backhaul composition.
Returns
-------
output : dict
Tower backhaul lookup table.
"""
output = {}
preference = [
'fiber',
'copper',
'microwave',
'satellite'
]
perc_so_far = 0
for tech in preference:
for item in backhaul_lut:
if tech == item['tech'].lower():
perc = item['percentage']
output[tech] = (perc + perc_so_far) / 100
perc_so_far += perc
return output
def area_of_polygon(geom):
"""
Returns the area of a polygon. Assume WGS84 before converting
to projected crs.
Parameters
----------
geom : shapely geometry
A shapely geometry object.
Returns
-------
poly_area : int
Area of polygon in square kilometers.
"""
geod = pyproj.Geod(ellps="WGS84")
poly_area, poly_perimeter = geod.geometry_area_perimeter(
geom
)
return abs(int(poly_area))
def length_of_line(geom):
"""
Returns the length of a linestring. Assume WGS84 as crs.
Parameters
----------
geom : shapely geometry
A shapely geometry object.
Returns
-------
total_length : int
Length of the linestring given in kilometers.
"""
geod = pyproj.Geod(ellps="WGS84")
total_length = geod.line_length(*geom.xy)
return abs(int(total_length))
def estimate_numers_of_sites(linear_regressor, x_value):
"""
Function to predict the y value from the stated x value.
Parameters
----------
linear_regressor : object
Linear regression object.
x_value : float
The stated x value we want to use to predict y.
Returns
-------
result : float
The predicted y value.
"""
if not x_value == 0:
result = linear_regressor.predict(x_value)
result = result[0,0]
else:
result = 0
return result
def exclude_small_shapes(x):
"""
Remove small multipolygon shapes.
Parameters
---------
x : polygon
Feature to simplify.
Returns
-------
MultiPolygon : MultiPolygon
Shapely MultiPolygon geometry without tiny shapes.
"""
# if its a single polygon, just return the polygon geometry
if x.geometry.geom_type == 'Polygon':
return x.geometry
# if its a multipolygon, we start trying to simplify
# and remove shapes if its too big.
elif x.geometry.geom_type == 'MultiPolygon':
area1 = 0.01
area2 = 50
# dont remove shapes if total area is already very small
if x.geometry.area < area1:
return x.geometry
# remove bigger shapes if country is really big
if x['GID_0'] in ['CHL','IDN']:
threshold = 0.01
elif x['GID_0'] in ['RUS','GRL','CAN','USA']:
threshold = 0.01
elif x.geometry.area > area2:
threshold = 0.1
else:
threshold = 0.001
# save remaining polygons as new multipolygon for
# the specific country
new_geom = []
for y in x.geometry:
if y.area > threshold:
new_geom.append(y)
return MultiPolygon(new_geom)
def clean_coverage(x):
"""
Cleans the coverage polygons by remove small multipolygon shapes.
Parameters
---------
x : polygon
Feature to simplify.
Returns
-------
MultiPolygon : MultiPolygon
Shapely MultiPolygon geometry without tiny shapes.
"""
# if its a single polygon, just return the polygon geometry
if x.geometry.geom_type == 'Polygon':
if x.geometry.area > 1e7:
return x.geometry
# if its a multipolygon, we start trying to simplify and
# remove shapes if its too big.
elif x.geometry.geom_type == 'MultiPolygon':
threshold = 1e7
# save remaining polygons as new multipolygon for
# the specific country
new_geom = []
for y in x.geometry:
if y.area > threshold:
new_geom.append(y)
return MultiPolygon(new_geom)
def estimate_core_nodes(iso3, pop_density_km2, settlement_size):
"""
This function identifies settlements which exceed a desired settlement
size. It is assumed fiber exists at settlements over, for example,
20,000 inhabitants.
Parameters
----------
iso3 : string
ISO 3 digit country code.
pop_density_km2 : int
Population density threshold for identifying built up areas.
settlement_size : int
Overall sittelement size assumption, e.g. 20,000 inhabitants.
Returns
-------
output : list of dicts
Identified major settlements as Geojson objects.
"""
path = os.path.join(DATA_INTERMEDIATE, iso3, 'settlements.tif')
with rasterio.open(path) as src:
data = src.read()
threshold = pop_density_km2
data[data < threshold] = 0
data[data >= threshold] = 1
polygons = rasterio.features.shapes(data, transform=src.transform)
shapes_df = gpd.GeoDataFrame.from_features(
[
{'geometry': poly, 'properties':{'value':value}}
for poly, value in polygons
if value > 0
],
crs='epsg:4326'
)
stats = zonal_stats(shapes_df['geometry'], path, stats=['count', 'sum'])
stats_df = pd.DataFrame(stats)
nodes = | pd.concat([shapes_df, stats_df], axis=1) | pandas.concat |
# <NAME>
# <EMAIL>
import pandas as pd
def concat_tablelist(table1=None,table2=None):
if table1 is None or table2 is None:
if table1 is not None:
return table1
else:
return table2
return | pd.concat([table1,table2],ignore_index=True) | pandas.concat |
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# ----------------------------------------
# Sources for electricity prices:
# ----------------------------------------
# site: https://dataminer2.pjm.com/list
# title: Data Miner 2
# institution: PJM
# accessed: 1/21/2020
#
# 2019_gen_by_fuel.csv
# Generation by Fuel Type
# Zone: PJM aggregate)
# dates: 1/1/2019 - 1/1/2020
#
# 2019_rt_hrl_lmps.csv
# Real-Time Hourly LMPs (Locational Marginal Pricing)
# Zone: PJM-RTO (PJM aggregate)
# dates: 1/1/2019 - 1/1/2020
# ----------------------------------------
# inputs
# ----------------------------------------
data_folders = ['2015', '2015',
'2017', '2017',
'2019', '2019']
price_files = ['da_hrl_lmps_DOM_15.csv', 'rt_hrl_lmps_DOM_15.csv',
'da_hrl_lmps_DOM_17.csv', 'rt_hrl_lmps_DOM_17.csv',
'da_hrl_lmps_DOM_19.csv', 'rt_hrl_lmps_DOM_19.csv']
generation_by_fuel_files = ['2015_gen_by_fuel.csv', '2015_gen_by_fuel.csv',
'2017_gen_by_fuel.csv', '2017_gen_by_fuel.csv',
'2019_gen_by_fuel.csv', '2019_gen_by_fuel.csv']
wind_speed_files = ['Clean_1yr_90m_Windspeeds.txt', 'Clean_1yr_90m_Windspeeds.txt',
'Clean_1yr_90m_Windspeeds.txt', 'Clean_1yr_90m_Windspeeds.txt',
'Clean_1yr_90m_Windspeeds.txt', 'Clean_1yr_90m_Windspeeds.txt']
output_filenames = ['da_timeseries_inputs_2015.csv', 'rt_timeseries_inputs_2015.csv',
'da_timeseries_inputs_2017.csv', 'rt_timeseries_inputs_2017.csv',
'da_timeseries_inputs_2019.csv', 'rt_timeseries_inputs_2019.csv']
# option to output individual column data (good for troubleshooting)
write_all_data = False
# ----------------------------------------
# begin script
# ----------------------------------------
# create a dataframe to combine all results together in to a single dataframe for multi-year simulations
df_comb = pd.DataFrame()
wrk_dir = os.getcwd()
for data_folder, price_file, generation_by_fuel_file, wind_speed_file, output_filename in \
zip(data_folders, price_files, generation_by_fuel_files, wind_speed_files, output_filenames):
# ----------------------------------------
# begin processing
# ----------------------------------------
os.chdir(data_folder)
# ----------------------------------------
# Emission factors (kg CO2/million btu)
# ----------------------------------------
# Source: Table A.3. Carbon Dioxide Uncontrolled Emission Factors
# https://www.eia.gov/electricity/annual/html/epa_a_03.html
# accessed 1/21/2020
# Coal - Bituminous coal, Gas - Natural gas, Multiple Fuels - Residual fuel oil, Oil - Residual fuel oil
emission_factors = {'Coal': 93.3, 'Flywheel': 0.0, 'Gas': 53.07, 'Hydro': 0.0, 'Multiple Fuels': 78.79,
'Nuclear': 0.0, 'Oil': 78.79, 'Other': 0.0, 'Other Renewables': 0.0, 'Solar': 0.0,
'Storage': 0.0, 'Wind': 0.0}
# ----------------------------------------
# Heat rate (btu / kWh) - 2018
# ----------------------------------------
# Source: Table 8.2. Average Tested Heat Rates by Prime Mover and Energy Source, 2008 - 2018
# https://www.eia.gov/electricity/annual/html/epa_08_02.html
# accessed 1/21/2020
# Coal - Coal + Steam Generator, Gas - Natural gas + Gas Turbine,
# Multiple Fuels - Petroleum + Gas Turbine, Oil - Petroleum + Gas Turbine
heat_rates = {'Coal': 10015, 'Flywheel': 0.0, 'Gas': 11138, 'Hydro': 0.0, 'Multiple Fuels': 13352.0,
'Nuclear': 0.0, 'Oil': 13352.0, 'Other': 0.0, 'Other Renewables': 0.0, 'Solar': 0.0,
'Storage': 0.0, 'Wind': 0.0}
# ----------------------------------------
# Pricing
# ----------------------------------------
df_price = pd.read_csv(price_file) # $/MWh
# set index
df_price.datetime_beginning_ept = pd.to_datetime(df_price.datetime_beginning_ept)
df_price = df_price.set_index('datetime_beginning_ept')
price_type = price_file[:2]
if price_type == 'da':
# create columns to store generation, VRE and emissions
df_price['price_dollarsPerMWh'] = df_price.loc[:, 'total_lmp_da']
# drop columns that we don't need
df_price = df_price.drop(
columns=['system_energy_price_da', 'datetime_beginning_utc', 'pnode_id', 'pnode_name', 'voltage',
'equipment',
'type', 'zone', 'total_lmp_da',
'congestion_price_da', 'marginal_loss_price_da', 'row_is_current', 'version_nbr'])
elif price_type == 'rt':
# create columns to store generation, VRE and emissions
df_price['price_dollarsPerMWh'] = df_price.loc[:, 'total_lmp_rt']
# drop columns that we don't need
df_price = df_price.drop(
columns=['system_energy_price_rt', 'datetime_beginning_utc', 'pnode_id', 'pnode_name', 'voltage',
'equipment',
'type', 'zone', 'total_lmp_rt',
'congestion_price_rt', 'marginal_loss_price_rt', 'row_is_current', 'version_nbr'])
else:
print('Warning - price file type not recognized')
print('file should start with da or rt, for day ahead or real time pricing')
# save and plot
if write_all_data:
df_price.to_csv('price.csv')
df_price.plot()
plt.tight_layout()
plt.savefig('price.png')
# ----------------------------------------
# Generation by fuel ->
# total generation
# emissions
# generation by VRE (variable renewable energy)
# ----------------------------------------
df_gen = | pd.read_csv(generation_by_fuel_file) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/6/16 15:28
Desc: 东方财富网-数据中心-特色数据-千股千评
http://data.eastmoney.com/stockcomment/
"""
from datetime import datetime
import pandas as pd
import requests
from tqdm import tqdm
def stock_comment_em() -> pd.DataFrame:
"""
东方财富网-数据中心-特色数据-千股千评
http://data.eastmoney.com/stockcomment/
:return: 千股千评数据
:rtype: pandas.DataFrame
"""
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "SECURITY_CODE",
"sortTypes": "1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPT_DMSK_TS_STOCKNEW",
"quoteColumns": "f2~01~SECURITY_CODE~CLOSE_PRICE,f8~01~SECURITY_CODE~TURNOVERRATE,f3~01~SECURITY_CODE~CHANGE_RATE,f9~01~SECURITY_CODE~PE_DYNAMIC",
"columns": "ALL",
"filter": "",
"token": "<KEY>",
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json["result"]["pages"]
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1), leave=False):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
big_df.columns = [
"序号",
"-",
"代码",
"-",
"交易日",
"名称",
"-",
"-",
"-",
"最新价",
"涨跌幅",
"-",
"换手率",
"主力成本",
"市盈率",
"-",
"-",
"机构参与度",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
"综合得分",
"上升",
"目前排名",
"关注指数",
"-",
]
big_df = big_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"换手率",
"市盈率",
"主力成本",
"机构参与度",
"综合得分",
"上升",
"目前排名",
"关注指数",
"交易日",
]
]
big_df["最新价"] = pd.to_numeric(big_df["最新价"], errors="coerce")
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"], errors="coerce")
big_df["换手率"] = pd.to_numeric(big_df["换手率"], errors="coerce")
big_df["市盈率"] = pd.to_numeric(big_df["市盈率"], errors="coerce")
big_df["主力成本"] = pd.to_numeric(big_df["主力成本"], errors="coerce")
big_df["机构参与度"] = pd.to_numeric(big_df["机构参与度"], errors="coerce")
big_df["综合得分"] = pd.to_numeric(big_df["综合得分"], errors="coerce")
big_df["上升"] = pd.to_numeric(big_df["上升"], errors="coerce")
big_df["目前排名"] = pd.to_numeric(big_df["目前排名"], errors="coerce")
big_df["关注指数"] = pd.to_numeric(big_df["关注指数"], errors="coerce")
big_df["交易日"] = pd.to_datetime(big_df["交易日"]).dt.date
return big_df
def stock_comment_detail_zlkp_jgcyd_em(symbol: str = "600000") -> pd.DataFrame:
"""
东方财富网-数据中心-特色数据-千股千评-主力控盘-机构参与度
https://data.eastmoney.com/stockcomment/stock/600000.html
:param symbol: 股票代码
:type symbol: str
:return: 主力控盘-机构参与度
:rtype: pandas.DataFrame
"""
url = f"https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"reportName": "RPT_DMSK_TS_STOCKEVALUATE",
"filter": f'(SECURITY_CODE="{symbol}")',
"columns": "ALL",
"source": "WEB",
"client": "WEB",
"sortColumns": "TRADE_DATE",
"sortTypes": "-1",
"_": "1655387358195",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
temp_df = temp_df[["TRADE_DATE", "ORG_PARTICIPATE"]]
temp_df.columns = ["date", "value"]
temp_df["date"] = pd.to_datetime(temp_df["date"]).dt.date
temp_df.sort_values(["date"], inplace=True)
temp_df.reset_index(inplace=True, drop=True)
temp_df["value"] = pd.to_numeric(temp_df["value"]) * 100
return temp_df
def stock_comment_detail_zhpj_lspf_em(symbol: str = "600000") -> pd.DataFrame:
"""
东方财富网-数据中心-特色数据-千股千评-综合评价-历史评分
https://data.eastmoney.com/stockcomment/stock/600000.html
:param symbol: 股票代码
:type symbol: str
:return: 综合评价-历史评分
:rtype: pandas.DataFrame
"""
url = f"https://data.eastmoney.com/stockcomment/api/{symbol}.json"
r = requests.get(url)
data_json = r.json()
temp_df = pd.DataFrame(
[
data_json["ApiResults"]["zhpj"]["HistoryScore"]["XData"],
data_json["ApiResults"]["zhpj"]["HistoryScore"]["Ydata"]["Score"],
data_json["ApiResults"]["zhpj"]["HistoryScore"]["Ydata"]["Price"],
]
).T
temp_df.columns = ["日期", "评分", "股价"]
temp_df["日期"] = str(datetime.now().year) + "-" + temp_df["日期"]
temp_df["日期"] = pd.to_datetime(temp_df["日期"]).dt.date
temp_df.sort_values(["日期"], inplace=True)
temp_df.reset_index(inplace=True, drop=True)
temp_df["评分"] = pd.to_numeric(temp_df["评分"])
temp_df["股价"] = pd.t | o_numeric(temp_df["股价"]) | pandas.to_numeric |
# utils contains the variables or functions that would be used across several notebooks
# These are saved and updated here. The notebooks could then be rerun without too many manual changes.
# Convert square foot into acres
feet_in_acres = 43560
"""
Define functions to upload to GitHub
Based on https://github.com/CityOfLosAngeles/aqueduct/tree/master/civis-aqueduct-utils/civis_aqueduct_utils
"""
import base64
import os
import fsspec
import requests
from arcgis.gis import GIS
from arcgis.features import FeatureLayerCollection
# Function to overwrite file in GitHub
DEFAULT_COMMITTER = {
"name": "Service User",
"email": "<EMAIL>",
}
def upload_file_to_github(
token,
repo,
branch,
path,
local_file_path,
commit_message,
committer=DEFAULT_COMMITTER,
):
"""
Parameters
----------
token: str
GitHub personal access token and corresponds to GITHUB_TOKEN
in Civis credentials.
repo: str
Repo name, such as 'CityofLosAngeles/covid19-indicators`
branch: str
Branch name, such as 'master'
path: str
Path to the file within the repo.
local_file_path: str
Path to the local file to be uploaded to the repo, which can differ
from the path within the GitHub repo.
commit_message: str
Commit message used when making the git commit.
commiter: dict
name and email associated with the committer.
Defaults to ITA robot user, if another committer is not provided..
"""
BASE = "https://api.github.com"
# Get the sha of the previous version.
# Operate on the dirname rather than the path itself so we
# don't run into file size limitations.
r = requests.get(
f"{BASE}/repos/{repo}/contents/{os.path.dirname(path)}",
params={"ref": branch},
headers={"Authorization": f"token {token}"},
)
r.raise_for_status()
item = next(i for i in r.json() if i["path"] == path)
sha = item["sha"]
# Upload the new version
with fsspec.open(local_file_path, "rb") as f:
contents = f.read()
r = requests.put(
f"{BASE}/repos/{repo}/contents/{path}",
headers={"Authorization": f"token {token}"},
json={
"message": commit_message,
"committer": committer,
"branch": branch,
"sha": sha,
"content": base64.b64encode(contents).decode("utf-8"),
},
)
r.raise_for_status()
"""
Uploading to ArcGIS Online
"""
# Overwrite ESRI layer
def update_geohub_layer(geohubUrl, user, pw, layer, update_data):
"""
user: str, ESRI username
pw: str, ESRI password
layer: str, ESRI feature layer ID
update_data: path to local CSV data used to overwrite feature layer
ex: "./file_name.csv"
"""
#geohub = GIS('https://lahub.maps.arcgis.com', user, pw)
geohub = GIS(geohubUrl, user, pw)
flayer = geohub.content.get(layer)
flayer_collection = FeatureLayerCollection.fromitem(flayer)
flayer_collection.manager.overwrite(update_data)
print("Successfully updated AGOL")
# Function to update feature layer, line by line
def chunks(list_of_features, n, agol_layer):
"""
Yield successive n-sized chunks from list_of_features.
list_of_features: list. List of features to be updated.
n: numeric. chunk size, 1000 is the max for AGOL feature layer
agol_layer: AGOL layer.
Ex:
flayer = gis.content.get(feature_layer_id)
agol_layer = flayer.layers[0]
"""
for i in range(0, len(list_of_features), n):
chunk_list=list_of_features[i:i + n]
agol_layer.edit_features(updates=chunk_list)
print("update successful")
"""
Functions to push datasets to Socrata open data portal.
"""
import os
import pandas as pd
import sodapy
def overwrite_socrata_table(username, password,
csv_file, socrata_dataset_id, NUM_MINUTES=20):
"""
username: str, Socrata username
password: str, <PASSWORD>
csv_file: str, path to local CSV file used to overwrite Socrata table
socrata_dataset_id: str, unique dataset ID for Socrata table
NUM_MINUTES: int, number of minutes for the timeout
"""
client = sodapy.Socrata("data.lacity.org",
app_token = None,
username = username,
password = password)
data = open(f"{csv_file}")
client.timeout = (NUM_MINUTES * 60)
client.replace(socrata_dataset_id, data)
print(f"{csv_file} updated")
os.remove(f"{csv_file}")
def upsert_socrata_rows(username, password,
csv_file, socrata_dataset_id, NUM_MINUTES=5):
"""
username: str, Socrata username
password: str, <PASSWORD>
csv_file: str, path to local CSV file used to overwrite Socrata table
socrata_dataset_id: str, unique dataset ID for Socrata table
NUM_MINUTES: int, number of minutes for the timeout
"""
client = sodapy.Socrata("data.lacity.org",
app_token = None,
username = username,
password = password)
# Grab existing table in Socrata and find where it leaves off
existing_table = client.get(socrata_dataset_id)
existing_table = pd.DataFrame(existing_table)
max_date = pd.to_datetime(existing_table.date.max())
df = pd.read_csv(f"{csv_file}")
df = df.assign(
date = | pd.to_datetime(df.date) | pandas.to_datetime |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, isnull, date_range,
MultiIndex, Index)
from pandas.tseries.index import Timestamp
from pandas.compat import range
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.Akima1DInterpolator missing')
class TestSeriesMissingData(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(
days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(
0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
from pandas import tslib
result = td.fillna(tslib.NaT)
expected = Series([tslib.NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
from pandas import tslib
result = s.fillna(tslib.NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'), Timestamp(
'20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(
['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001',
'2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00', tz=tz)])
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-04 10:00')])
self.assert_series_equal(expected, result)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT], tz=tz)
s = pd.Series(idx)
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp(
'2011-01-02 10:00', tz=tz).to_pydatetime())
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA',
Timestamp('2011-01-03 10:00', tz=tz), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp(
'2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00', tz=tz)})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp(
'2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00',
tz=tz)])
self.assert_series_equal(expected, result)
# filling with a naive/other zone, coerce to object
result = s.fillna(Timestamp('20130101'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2013-01-01'), Timestamp('2011-01-03 10:00', tz=tz), Timestamp(
'2013-01-01')])
self.assert_series_equal(expected, result)
result = s.fillna(Timestamp('20130101', tz='US/Pacific'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific')])
self.assert_series_equal(expected, result)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
def test_fillna_raise(self):
s = Series(np.random.randint(-100, 100, 50))
self.assertRaises(TypeError, s.fillna, [1, 2])
self.assertRaises(TypeError, s.fillna, (1, 2))
def test_isnull_for_inf(self):
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_null', True):
r = s.isnull()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_fillna(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
self.assert_series_equal(ts, ts.fillna(method='ffill'))
ts[2] = np.NaN
exp = Series([0., 1., 1., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(method='ffill'), exp)
exp = Series([0., 1., 3., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(method='backfill'), exp)
exp = Series([0., 1., 5., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(value=5), exp)
self.assertRaises(ValueError, ts.fillna)
self.assertRaises(ValueError, self.ts.fillna, value=0, method='ffill')
# GH 5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.])
assert_series_equal(result, expected)
result = s1.fillna({})
assert_series_equal(result, s1)
result = s1.fillna(Series(()))
assert_series_equal(result, s1)
result = s2.fillna(s1)
assert_series_equal(result, s2)
result = s1.fillna({0: 1})
assert_series_equal(result, expected)
result = s1.fillna({1: 1})
assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
assert_series_equal(result, s1)
s1 = Series([0, 1, 2], list('abc'))
s2 = Series([0, np.nan, 2], list('bac'))
result = s2.fillna(s1)
expected = Series([0, 0, 2.], list('bac'))
assert_series_equal(result, expected)
# limit
s = Series(np.nan, index=[0, 1, 2])
result = s.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
result = s.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
# GH 9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ['0', '1.5', '-0.3']
for val in vals:
s = Series([0, 1, np.nan, np.nan, 4], dtype='float64')
result = s.fillna(val)
expected = Series([0, 1, val, val, 4], dtype='object')
assert_series_equal(result, expected)
def test_fillna_bug(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
filled = x.fillna(method='ffill')
expected = Series([nan, 1., 1., 3., 3.], x.index)
assert_series_equal(filled, expected)
filled = x.fillna(method='bfill')
expected = Series([1., 1., 3., 3., nan], x.index)
assert_series_equal(filled, expected)
def test_fillna_inplace(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
y = x.copy()
y.fillna(value=0, inplace=True)
expected = x.fillna(value=0)
assert_series_equal(y, expected)
def test_fillna_invalid_method(self):
try:
self.ts.fillna(method='ffil')
except ValueError as inst:
self.assertIn('ffil', str(inst))
def test_ffill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.ffill(), ts.fillna(method='ffill'))
def test_bfill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))
def test_timedelta64_nan(self):
from pandas import tslib
td = Series([timedelta(days=i) for i in range(10)])
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
self.assertTrue(isnull(td1[0]))
self.assertEqual(td1[0].value, tslib.iNaT)
td1[0] = td[0]
self.assertFalse(isnull(td1[0]))
td1[1] = tslib.iNaT
self.assertTrue(isnull(td1[1]))
self.assertEqual(td1[1].value, tslib.iNaT)
td1[1] = td[1]
self.assertFalse(isnull(td1[1]))
td1[2] = tslib.NaT
self.assertTrue(isnull(td1[2]))
self.assertEqual(td1[2].value, tslib.iNaT)
td1[2] = td[2]
self.assertFalse(isnull(td1[2]))
# boolean setting
# this doesn't work, not sure numpy even supports it
# result = td[(td>np.timedelta64(timedelta(days=3))) &
# td<np.timedelta64(timedelta(days=7)))] = np.nan
# self.assertEqual(isnull(result).sum(), 7)
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= self.ts <= 0.5
# expected = (self.ts >= -0.5) & (self.ts <= 0.5)
# assert_series_equal(selector, expected)
def test_dropna_empty(self):
s = Series([])
self.assertEqual(len(s.dropna()), 0)
s.dropna(inplace=True)
self.assertEqual(len(s), 0)
# invalid axis
self.assertRaises(ValueError, s.dropna, axis=1)
def test_datetime64_tz_dropna(self):
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-03 10:00')], index=[0, 2])
self.assert_series_equal(result, expected)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT],
tz='Asia/Tokyo')
s = pd.Series(idx)
self.assertEqual(s.dtype, 'datetime64[ns, Asia/Tokyo]')
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-03 10:00', tz='Asia/Tokyo')],
index=[0, 2])
self.assertEqual(result.dtype, 'datetime64[ns, Asia/Tokyo]')
self.assert_series_equal(result, expected)
def test_dropna_no_nan(self):
for s in [Series([1, 2, 3], name='x'), Series(
[False, True, False], name='x')]:
result = s.dropna()
self.assert_series_equal(result, s)
self.assertFalse(result is s)
s2 = s.copy()
s2.dropna(inplace=True)
self.assert_series_equal(s2, s)
def test_valid(self):
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.valid()
self.assertEqual(len(result), ts.count())
tm.assert_series_equal(result, ts[1::2])
tm.assert_series_equal(result, ts[pd.notnull(ts)])
def test_isnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.isnull(),
Series([False, False, False, True, False]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.isnull(), Series([False, False, True]).values)
def test_notnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.notnull(),
Series([True, True, True, False, True]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.notnull(), Series([True, True, False]).values)
def test_pad_nan(self):
x = Series([np.nan, 1., np.nan, 3., np.nan], ['z', 'a', 'b', 'c', 'd'],
dtype=float)
x.fillna(method='pad', inplace=True)
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0],
['z', 'a', 'b', 'c', 'd'], dtype=float)
assert_series_equal(x[1:], expected[1:])
self.assertTrue(np.isnan(x[0]), np.isnan(expected[0]))
def test_dropna_preserve_name(self):
self.ts[:5] = np.nan
result = self.ts.dropna()
self.assertEqual(result.name, self.ts.name)
name = self.ts.name
ts = self.ts.copy()
ts.dropna(inplace=True)
self.assertEqual(ts.name, name)
def test_fill_value_when_combine_const(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
assert_series_equal(res, exp)
class TestSeriesInterpolateData(TestData, tm.TestCase):
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_series_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_series_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
def test_interpolate_pchip(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_akima(self):
tm._skip_if_no_scipy()
_skip_if_no_akima()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(method='akima')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_piecewise_polynomial(self):
tm._skip_if_no_scipy()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='piecewise_polynomial')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_from_derivatives(self):
tm._skip_if_no_scipy()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='from_derivatives')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with tm.assertRaises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = | Series([1., 3., 6.8, 12., 18.2, 25.]) | pandas.Series |
import unittest
import dolphindb as ddb
import dolphindb.settings as keys
import numpy as np
from numpy.testing import *
from setup import HOST, PORT, WORK_DIR
import pandas as pd
class DBInfo:
dfsDBName = 'dfs://testDatabase'
diskDBName = WORK_DIR + '/testDatabase'
def existsDB(dbName):
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
return s.run("existsDatabase('{db}')".format(db=dbName))
def dropDB(dbName):
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
s.run("dropDatabase('{db}')".format(db=dbName))
class DatabaseTest(unittest.TestCase):
@classmethod
def setUp(cls):
cls.s = ddb.session()
cls.s.connect(HOST, PORT, "admin", "123456")
dbPaths = [DBInfo.dfsDBName, DBInfo.diskDBName]
for dbPath in dbPaths:
script = """
if(existsDatabase('{dbPath}'))
dropDatabase('{dbPath}')
if(exists('{dbPath}'))
rmdir('{dbPath}', true)
""".format(dbPath=dbPath)
cls.s.run(script)
@classmethod
def tearDown(cls):
cls.s = ddb.session()
cls.s.connect(HOST, PORT, "admin", "123456")
dbPaths = [DBInfo.dfsDBName, DBInfo.diskDBName]
for dbPath in dbPaths:
script = """
if(existsDatabase('{dbPath}'))
dropDatabase('{dbPath}')
if(exists('{dbPath}'))
rmdir('{dbPath}', true)
""".format(dbPath=dbPath)
cls.s.run(script)
def test_create_dfs_database_range_partition(self):
if existsDB(DBInfo.dfsDBName):
dropDB(DBInfo.dfsDBName)
db = self.s.database('db', partitionType=keys.RANGE, partitions=[1, 11, 21], dbPath=DBInfo.dfsDBName)
self.assertEqual(existsDB(DBInfo.dfsDBName), True)
dct = {'databaseDir': DBInfo.dfsDBName,
'partitionSchema': np.array([1, 11, 21], dtype=np.int32),
'partitionSites': None,
'partitionTypeName':'RANGE',
'partitionType': 2}
re = self.s.run("schema(db)")
self.assertEqual(re['databaseDir'], dct['databaseDir'])
assert_array_equal(re['partitionSchema'], dct['partitionSchema'])
self.assertEqual(re['partitionSites'], dct['partitionSites'])
df = pd.DataFrame({'id': np.arange(1, 21), 'val': np.repeat(1, 20)})
t = self.s.table(data=df, tableAliasName='t')
db.createPartitionedTable(table=t, tableName='pt', partitionColumns='id').append(t)
re = self.s.loadTable(tableName='pt', dbPath=DBInfo.dfsDBName).toDF()
assert_array_equal(re['id'], np.arange(1, 21))
assert_array_equal(re['val'], np.repeat(1, 20))
db.createTable(table=t, tableName='dt').append(t)
re = self.s.loadTable(tableName='dt', dbPath=DBInfo.dfsDBName).toDF()
assert_array_equal(re['id'], np.arange(1, 21))
assert_array_equal(re['val'], np.repeat(1, 20))
def test_create_dfs_database_hash_partition(self):
if existsDB(DBInfo.dfsDBName):
dropDB(DBInfo.dfsDBName)
db = self.s.database('db', partitionType=keys.HASH, partitions=[keys.DT_INT, 2], dbPath=DBInfo.dfsDBName)
self.assertEqual(existsDB(DBInfo.dfsDBName), True)
dct = {'databaseDir': DBInfo.dfsDBName,
'partitionSchema': 2,
'partitionSites': None,
'partitionTypeName':'HASH',
'partitionType': 5}
re = self.s.run("schema(db)")
self.assertEqual(re['databaseDir'], dct['databaseDir'])
self.assertEqual(re['partitionSchema'], dct['partitionSchema'])
self.assertEqual(re['partitionSites'], dct['partitionSites'])
df = pd.DataFrame({'id':[1,2,3,4,5], 'val':[10, 20, 30, 40, 50]})
t = self.s.table(data=df)
pt = db.createPartitionedTable(table=t, tableName='pt', partitionColumns='id')
pt.append(t)
re = self.s.loadTable(tableName='pt', dbPath=DBInfo.dfsDBName).toDF()
assert_array_equal(np.sort(re['id']), df['id'])
assert_array_equal(np.sort(re['val']), df['val'])
dt = db.createTable(table=t, tableName='dt')
dt.append(t)
re = self.s.loadTable(tableName='pt', dbPath=DBInfo.dfsDBName).toDF()
assert_array_equal(np.sort(re['id']), df['id'])
assert_array_equal(np.sort(re['val']), df['val'])
def test_create_dfs_database_value_partition(self):
if existsDB(DBInfo.dfsDBName):
dropDB(DBInfo.dfsDBName)
db = self.s.database('db', partitionType=keys.VALUE, partitions=[1, 2, 3], dbPath=DBInfo.dfsDBName)
self.assertEqual(existsDB(DBInfo.dfsDBName), True)
dct = {'databaseDir': DBInfo.dfsDBName,
'partitionSchema': np.array([3, 1, 2], dtype=np.int32),
'partitionSites': None,
'partitionTypeName':'VALUE',
'partitionType': 1}
re = self.s.run("schema(db)")
self.assertEqual(re['databaseDir'], dct['databaseDir'])
assert_array_equal(re['partitionSchema'], dct['partitionSchema'])
self.assertEqual(re['partitionSites'], dct['partitionSites'])
df = pd.DataFrame({'id':[1, 2, 3, 1, 2, 3], 'val':[11, 12, 13, 14, 15, 16]})
t = self.s.table(data=df)
pt = db.createPartitionedTable(table=t, tableName='pt', partitionColumns='id').append(t)
re = self.s.loadTable(tableName='pt', dbPath=DBInfo.dfsDBName).toDF()
assert_array_equal(np.sort(df['id']), np.sort(re['id']))
assert_array_equal(np.sort(df['val']), np.sort(re['val']))
dt = db.createTable(table=t, tableName='dt').append(t)
re = self.s.loadTable(tableName='dt', dbPath=DBInfo.dfsDBName).toDF()
assert_array_equal(np.sort(df['id']), np.sort(re['id']))
assert_array_equal(np.sort(df['val']), np.sort(re['val']))
def test_create_dfs_database_list_partition(self):
if existsDB(DBInfo.dfsDBName):
dropDB(DBInfo.dfsDBName)
db = self.s.database('db', partitionType=keys.LIST, partitions=[['IBM', 'ORCL', 'MSFT'], ['GOOG', 'FB']],
dbPath=DBInfo.dfsDBName)
self.assertEqual(existsDB(DBInfo.dfsDBName), True)
dct = {'databaseDir': DBInfo.dfsDBName,
'partitionSchema': np.array([np.array(['IBM', 'ORCL', 'MSFT']), np.array(['GOOG', 'FB'])]),
'partitionSites': None,
'partitionTypeName':'LIST',
'partitionType': 3}
re = self.s.run("schema(db)")
self.assertEqual(re['databaseDir'], dct['databaseDir'])
assert_array_equal(re['partitionSchema'][0], dct['partitionSchema'][0])
assert_array_equal(re['partitionSchema'][1], dct['partitionSchema'][1])
self.assertEqual(re['partitionSites'], dct['partitionSites'])
df = pd.DataFrame({'sym':['IBM', 'ORCL', 'MSFT', 'GOOG', 'FB'], 'val':[1,2,3,4,5]})
t = self.s.table(data=df)
db.createPartitionedTable(table=t, tableName='pt', partitionColumns='sym').append(t)
re = self.s.loadTable(tableName='pt', dbPath=DBInfo.dfsDBName).toDF()
assert_array_equal(re['sym'], df['sym'])
assert_array_equal(re['val'], df['val'])
db.createTable(table=t, tableName='dt').append(t)
re = self.s.loadTable(tableName='dt', dbPath=DBInfo.dfsDBName).toDF()
assert_array_equal(re['sym'], df['sym'])
assert_array_equal(re['val'], df['val'])
def test_create_dfs_database_value_partition_np_date(self):
if existsDB(DBInfo.dfsDBName):
dropDB(DBInfo.dfsDBName)
dates=np.array(pd.date_range(start='20120101', end='20120110'), dtype="datetime64[D]")
db = self.s.database('db', partitionType=keys.VALUE, partitions=dates,
dbPath=DBInfo.dfsDBName)
self.assertEqual(existsDB(DBInfo.dfsDBName), True)
dct = {'databaseDir': DBInfo.dfsDBName,
'partitionType': 1,
'partitionSchema': np.array(pd.date_range(start='20120101', end='20120110'), dtype="datetime64[D]"),
'partitionSites': None
}
re = self.s.run("schema(db)")
self.assertEqual(re['databaseDir'], dct['databaseDir'])
self.assertEqual(re['partitionType'], dct['partitionType'])
assert_array_equal(np.sort(re['partitionSchema']), dct['partitionSchema'])
df = pd.DataFrame({'datetime':np.array(['2012-01-01T00:00:00', '2012-01-02T00:00:00'], dtype='datetime64'), 'sym':['AA', 'BB'], 'val':[1,2]})
t = self.s.table(data=df)
db.createPartitionedTable(table=t, tableName='pt', partitionColumns='datetime').append(t)
re = self.s.run("schema(loadTable('{dbPath}', 'pt')).colDefs".format(dbPath=DBInfo.dfsDBName))
assert_array_equal(re['name'], ['datetime', 'sym', 'val'])
assert_array_equal(re['typeString'], ['NANOTIMESTAMP', 'STRING', 'LONG'])
re = self.s.loadTable(tableName='pt', dbPath=DBInfo.dfsDBName).toDF()
assert_array_equal(re['datetime'], df['datetime'])
assert_array_equal(re['sym'], df['sym'])
assert_array_equal(re['val'], df['val'])
db.createTable(table=t, tableName='dt').append(t)
re = self.s.loadTable(tableName='dt', dbPath=DBInfo.dfsDBName).toDF()
assert_array_equal(re['datetime'], df['datetime'])
assert_array_equal(re['sym'], df['sym'])
assert_array_equal(re['val'], df['val'])
def test_create_dfs_database_value_partition_np_month(self):
if existsDB(DBInfo.dfsDBName):
dropDB(DBInfo.dfsDBName)
months=np.array( | pd.date_range(start='2012-01', end='2012-10', freq="M") | pandas.date_range |
# -*- coding: utf-8 -*-
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
bd_train = pd.read_csv('/home/mathxs/Documentos/Projetos do Git/DesafioPy/testfiles/train.csv')
#bd_train.columns
#bd_train = bd_train.dropna(axis=1, how='all');
#bd_train = bd_train.dropna(axis=0, subset=['NU_NOTA_MT', 'CO_UF_RESIDENCIA', 'NU_IDADE', 'TP_COR_RACA', 'TP_NACIONALIDADE', 'TP_ST_CONCLUSAO', 'TP_ANO_CONCLUIU', 'TP_ESCOLA', 'TP_ENSINO', 'IN_TREINEIRO', 'TP_DEPENDENCIA_ADM_ESC', 'IN_BAIXA_VISAO', 'IN_SURDEZ', 'IN_DISLEXIA', 'IN_DISCALCULIA', 'IN_SABATISTA', 'IN_GESTANTE', 'IN_IDOSO', 'TP_PRESENCA_CN', 'TP_PRESENCA_CH', 'TP_PRESENCA_LC', 'NU_NOTA_CN', 'NU_NOTA_CH', 'NU_NOTA_LC', 'TP_LINGUA', 'TP_STATUS_REDACAO', 'NU_NOTA_COMP1', 'NU_NOTA_COMP2', 'NU_NOTA_COMP3', 'NU_NOTA_COMP4', 'NU_NOTA_COMP5', 'NU_NOTA_REDACAO'])
bd_train = pd.DataFrame(bd_train, columns=['Q047', 'Q001', 'Q002', 'Q006', 'Q024', 'Q025', 'Q026', 'TP_SEXO','NU_NOTA_MT', 'CO_UF_RESIDENCIA', 'NU_IDADE', 'TP_COR_RACA', 'TP_ST_CONCLUSAO', 'TP_ANO_CONCLUIU', 'TP_ESCOLA', 'TP_ENSINO', 'TP_PRESENCA_CH', 'TP_PRESENCA_LC', 'NU_NOTA_CN', 'NU_NOTA_CH', 'NU_NOTA_LC', 'TP_LINGUA', 'TP_STATUS_REDACAO', 'NU_NOTA_COMP1', 'NU_NOTA_COMP2', 'NU_NOTA_COMP3', 'NU_NOTA_COMP4', 'NU_NOTA_COMP5', 'NU_NOTA_REDACAO'])
bd_train = bd_train.replace('M','0')
bd_train = bd_train.replace('F','1')
bd_train = bd_train.replace('A','0')
bd_train = bd_train.replace('B','1')
bd_train = bd_train.replace('C','2')
bd_train = bd_train.replace('D','3')
bd_train = bd_train.replace('E','4')
bd_train = bd_train.replace('F','5')
bd_train = bd_train.replace('G','6')
bd_train = bd_train.replace('H','7')
bd_train = bd_train.replace('I','8')
bd_train = bd_train.replace('J','9')
bd_train = bd_train.replace('K','10')
bd_train = bd_train.replace('L','11')
bd_train = bd_train.replace('M','12')
bd_train = bd_train.replace('N','13')
bd_train = bd_train.replace('O','14')
bd_train = bd_train.replace('P','15')
bd_train = bd_train.replace('Q','16')
'''
bd_train = bd_train.replace('c8328ebc6f3238e06076c481bc1b82b8301e7a3f',100)
bd_train = bd_train.replace('b9b06ce8c319a3df2158ea3d0aef0f7d3eecaed7',101)
bd_train = bd_train.replace('2d22ac1d42e6187f09ee6c578df187a760123ccf',102)
bd_train = bd_train.replace('c8328ebc6f3238e06076c481bc1b82b8301e7a3f',103)
bd_train = bd_train.replace('66b1dad288e13be0992bae01e81f71eca1c6e8a6',104)
bd_train = bd_train.replace('03b1fba5c1ebbc47988cd303b08982cfb2aa9cf2',105)
bd_train = bd_train.replace('c87a85497686b3e7b3765f84a2ca95256f0f66aa',106)
bd_train = bd_train.replace('69ed2ddcb151cfebe3d2ae372055335ac7c8c144',107)
bd_train = bd_train.replace('1bcdece8fb1b952552b319e4e5512bbcf540e338',108)
bd_train = bd_train.replace('a27a1efea095c8a973496f0b57a24ac6775d95b0',109)
bd_train = bd_train.replace('9cd70f1b922e02bd33453b3f607f5a644fb9b1b8',110)
bd_train = bd_train.replace('909237ab0d84688e10c0470e2997348aff585273',111)
bd_train = bd_train.replace('f48d390ab6a2428e659c37fb8a9d00afde621889',112)
bd_train = bd_train.replace('942ab3dc020af4cf53740b6b07e9dd7060b24164',113)
bd_train = bd_train.replace('f94e97c2a5689edd5369740fde9a927e23a9465f',114)
bd_train = bd_train.replace('0fb4772fc6ee9b951ade2fbe6699cc37985c422e',115)
bd_train = bd_train.replace('c95541bf218d7ff70572ca4bcb421edeff05c6d5',116)
bd_train = bd_train.replace('6c3fec2ef505409a9e7c3d2e8634fa2aced4ee93',117)
bd_train = bd_train.replace('d5f6d17523d2cce3e4dc0a7f0582a85cec1c15ee',118)
bd_train = bd_train.replace('01af53cd161a420fff1767129c10de560cc264dd',119)
bd_train = bd_train.replace('01abbb7f1a90505385f44eec9905f82ca2a42cfd',120)
bd_train = bd_train.replace('5aebe5cad7fabc1545ac7fba07a4e6177f98483c',121)
bd_train = bd_train.replace('72f80e4b3150c627c7ffc93cfe0fa13a9989b610',122)
bd_train = bd_train.replace('9cbf6bf31d9d89a64ce2737ece4834fde4a95029',123)
bd_train = bd_train.replace('fa86b01f07636b15adfd66b688c79934730721a6',124)
bd_train = bd_train.replace('44b09b311799bd684b3d02463bfa99e472c6adb3',125)
bd_train = bd_train.replace('481058938110a64a272266e3892102b8ef0ca96f',126)
bd_train = bd_train.replace('97caab1e1533dba217deb7ef41490f52e459ab01',127)
bd_train = bd_train.replace('81d0ee00ef42a7c23eb04496458c03d4c5b9c31a',128)
bd_train = bd_train.replace('767a32545304ed293242d528f54d4edb1369f910',129)
bd_train = bd_train.replace('577f8968d95046f5eb5cc158608e12fa9ba34c85',130)
bd_train = bd_train.replace('0ec1c8ac02d2747b6e9a99933fbf96127dd6e89e',131)
bd_train = bd_train.replace('0e0082361eaceb6418bb17305a2b7912650b4783',132)
bd_train = bd_train.replace('6d6961694e839531aec2d35bbd8552b55394a0d7',133)
bd_train = bd_train.replace('73c5c86eef8f70263e4c5708d153cca123f93378',134)
bd_train = bd_train.replace('16f84b7b3d2aeaff7d2f01297e6b3d0e25c77bb2',135)
'''
bd_train = bd_train.fillna('-10')
bd_train1 = bd_train[bd_train['TP_PRESENCA_CH'] != 2]
bd_train2 = bd_train1[bd_train1['TP_PRESENCA_LC'] == 1]
#sns.distplot(bd_train['NU_NOTA_MT'], bd_train['NU_NOTA_CH'], bd_train['NU_NOTA_LC'], bd_train['NU_NOTA_CN']);
#sns.distplot(bd_train['NU_NOTA_MT']);
columns=['Q047','CO_PROVA_CH', 'CO_PROVA_MT', 'Q001', 'Q002', 'Q006', 'Q024', 'Q025', 'Q026', 'TP_SEXO','NU_NOTA_MT', 'CO_UF_RESIDENCIA', 'NU_IDADE', 'TP_COR_RACA', 'TP_ST_CONCLUSAO', 'TP_ANO_CONCLUIU', 'TP_ESCOLA', 'TP_ENSINO', 'TP_PRESENCA_CH', 'TP_PRESENCA_LC', 'NU_NOTA_CN', 'NU_NOTA_CH', 'NU_NOTA_LC', 'TP_LINGUA', 'TP_STATUS_REDACAO', 'NU_NOTA_COMP1', 'NU_NOTA_COMP2', 'NU_NOTA_COMP3', 'NU_NOTA_COMP4', 'NU_NOTA_COMP5', 'NU_NOTA_REDACAO']
'''
for col in columns:
#plt.hist(bd_train[col], normed=True, alpha=0.5)
sns.kdeplot(bd_train[col], shade=True)
'''
#Regressão linear
# Preparando os Dados
#X = bd_train[['NU_NOTA_CH']]
X = bd_train2[['Q047', 'Q001', 'Q002', 'Q006', 'Q024', 'Q025', 'Q026', 'TP_SEXO','CO_UF_RESIDENCIA', 'NU_IDADE', 'TP_COR_RACA', 'TP_ST_CONCLUSAO', 'TP_ANO_CONCLUIU', 'TP_ESCOLA', 'TP_ENSINO', 'TP_PRESENCA_CH', 'TP_PRESENCA_LC', 'NU_NOTA_CN', 'NU_NOTA_CH', 'NU_NOTA_LC', 'TP_LINGUA', 'TP_STATUS_REDACAO', 'NU_NOTA_COMP1', 'NU_NOTA_COMP2', 'NU_NOTA_COMP3', 'NU_NOTA_COMP4', 'NU_NOTA_COMP5', 'NU_NOTA_REDACAO']]
y = bd_train2[['NU_NOTA_MT']]
# Separando o conjunto de dados em Conjunto de Treino e Validação
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 10)
#Treinando o Modelo de Regressão Linear
#from sklearn.linear_model import LinearRegression
#regressor = LinearRegression()
# import the class
#from sklearn.linear_model import LogisticRegression
# instantiate the model (using the default parameters)
#regressor = LogisticRegression()
# Ajustando os Dados de Treinamento ao nosso Modelo
#regressor.fit(X, np.array(y).astype(int).ravel())
# Predizendo o preço para nosso Conjunto de Validação
#y_pred = regressor.predict(X_test)
# Pontuando o Modelo
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators=10000, random_state=0)
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
from sklearn.metrics import r2_score, mean_squared_error
# Valor de R2 perto de 1 nos diz que é um bom modelo
print(f"R2 score: {r2_score(y_test, y_pred)}")
# MSE Score perto de 0 é um bom modelo
print(f"MSE score: {mean_squared_error(y_test, y_pred)}")
bd_erro = pd.DataFrame.from_records(y_test)
arr = np.array(y_pred)
df = pd.DataFrame(data=arr.flatten())
bd_erro = bd_erro.join(df)
'''
#plotando o grafico
linha = np.linspace(-3, 3, 500).reshape(-1, 1) #Para gerar a linha da predição
plt.plot(X_train, y_train, "^", markersize = 5)
plt.plot(X_test, y_test, "v", markersize = 7)
plt.plot(linha, lr.predict(linha))
plt.grid(True)
'''
bd_result = pd.read_csv('/home/mathxs/Documentos/Projetos do Git/DesafioPy/testfiles/test.csv')
#bd_train.columns
#bd_result = bd_result.dropna(axis=1, how='all');
#bd_result = bd_train.dropna(axis=0, subset=['NU_NOTA_MT'])
#bd_result = bd_result.dropna(axis=0, subset=['NU_INSCRICAO', 'CO_UF_RESIDENCIA', 'NU_IDADE', 'TP_COR_RACA', 'TP_NACIONALIDADE', 'TP_ST_CONCLUSAO', 'TP_ANO_CONCLUIU', 'TP_ESCOLA', 'TP_ENSINO', 'IN_TREINEIRO', 'TP_DEPENDENCIA_ADM_ESC', 'IN_BAIXA_VISAO', 'IN_SURDEZ', 'IN_DISLEXIA', 'IN_DISCALCULIA', 'IN_SABATISTA', 'IN_GESTANTE', 'IN_IDOSO', 'TP_PRESENCA_CN', 'TP_PRESENCA_CH', 'TP_PRESENCA_LC', 'NU_NOTA_CN', 'NU_NOTA_CH', 'NU_NOTA_LC', 'TP_LINGUA', 'TP_STATUS_REDACAO', 'NU_NOTA_COMP1', 'NU_NOTA_COMP2', 'NU_NOTA_COMP3', 'NU_NOTA_COMP4', 'NU_NOTA_COMP5', 'NU_NOTA_REDACAO'])
bd_result = | pd.DataFrame(bd_result, columns=['Q047', 'Q001', 'Q002', 'Q006', 'Q024', 'Q025', 'Q026', 'TP_SEXO','NU_INSCRICAO', 'CO_UF_RESIDENCIA', 'NU_IDADE', 'TP_COR_RACA', 'TP_ST_CONCLUSAO', 'TP_ANO_CONCLUIU', 'TP_ESCOLA', 'TP_ENSINO' , 'TP_PRESENCA_CH', 'TP_PRESENCA_LC', 'NU_NOTA_CN', 'NU_NOTA_CH', 'NU_NOTA_LC', 'TP_LINGUA', 'TP_STATUS_REDACAO', 'NU_NOTA_COMP1', 'NU_NOTA_COMP2', 'NU_NOTA_COMP3', 'NU_NOTA_COMP4', 'NU_NOTA_COMP5', 'NU_NOTA_REDACAO']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import random
import logging
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from tests.test_base import BaseTest
from mabwiser.mab import MAB, LearningPolicy, NeighborhoodPolicy
from mabwiser.simulator import Simulator
logging.disable(logging.CRITICAL)
class ExampleTest(BaseTest):
def test_popularity(self):
list_of_arms = ['Arm1', 'Arm2']
decisions = ['Arm1', 'Arm1', 'Arm2', 'Arm1']
rewards = [20, 17, 25, 9]
mab = MAB(list_of_arms, LearningPolicy.Popularity())
mab.fit(decisions, rewards)
mab.predict()
self.assertEqual("Arm2", mab.predict())
self.assertDictEqual({'Arm1': 0.38016528925619836, 'Arm2': 0.6198347107438016},
mab.predict_expectations())
def test_random(self):
arm, mab = self.predict(arms=[1, 2],
decisions=[1, 1, 1, 2, 1, 2, 2, 1, 2, 1, 2, 2, 1, 2, 1],
rewards=[10, 17, 22, 9, 4, 0, 7, 8, 20, 9, 50, 5, 7, 12, 10],
learning_policy=LearningPolicy.Random(),
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(arm, 1)
layout_partial = [1, 2, 1, 2]
revenue_partial = [0, 12, 7, 19]
mab.partial_fit(decisions=layout_partial, rewards=revenue_partial)
mab.add_arm(3)
self.assertTrue(3 in mab.arms)
self.assertTrue(3 in mab._imp.arm_to_expectation.keys())
def test_greedy15(self):
arm, mab = self.predict(arms=[1, 2],
decisions=[1, 1, 1, 2, 1, 2, 2, 1, 2, 1, 2, 2, 1, 2, 1],
rewards=[10, 17, 22, 9, 4, 0, 7, 8, 20, 9, 50, 5, 7, 12, 10],
learning_policy=LearningPolicy.EpsilonGreedy(epsilon=0.15),
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(arm, 2)
layout_partial = [1, 2, 1, 2]
revenue_partial = [0, 12, 7, 19]
mab.partial_fit(decisions=layout_partial, rewards=revenue_partial)
mab.add_arm(3)
self.assertTrue(3 in mab.arms)
self.assertTrue(3 in mab._imp.arm_to_expectation.keys())
def test_linucb(self):
train_df = pd.DataFrame({'ad': [1, 1, 1, 2, 4, 5, 3, 3, 2, 1, 4, 5, 3, 2, 5],
'revenues': [10, 17, 22, 9, 4, 20, 7, 8, 20, 9, 50, 5, 7, 12, 10],
'age': [22, 27, 39, 48, 21, 20, 19, 37, 52, 26, 18, 42, 55, 57, 38],
'click_rate': [0.2, 0.6, 0.99, 0.68, 0.15, 0.23, 0.75, 0.17,
0.33, 0.65, 0.56, 0.22, 0.19, 0.11, 0.83],
'subscriber': [1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0]}
)
# Test data to for new prediction
test_df = pd.DataFrame({'age': [37, 52], 'click_rate': [0.5, 0.6], 'subscriber': [0, 1]})
test_df_revenue = pd.Series([7, 13])
# Scale the data
scaler = StandardScaler()
train = scaler.fit_transform(np.asarray(train_df[['age', 'click_rate', 'subscriber']], dtype='float64'))
test = scaler.transform(np.asarray(test_df, dtype='float64'))
arms, mab = self.predict(arms=[1, 2, 3, 4, 5],
decisions=train_df['ad'],
rewards=train_df['revenues'],
learning_policy=LearningPolicy.LinUCB(alpha=1.25),
context_history=train,
contexts=test,
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(arms, [5, 2])
mab.partial_fit(decisions=arms, rewards=test_df_revenue, contexts=test)
mab.add_arm(6)
self.assertTrue(6 in mab.arms)
self.assertTrue(6 in mab._imp.arm_to_expectation.keys())
def test_softmax(self):
arm, mab = self.predict(arms=[1, 2],
decisions=[1, 1, 1, 2, 1, 2, 2, 1, 2, 1, 2, 2, 1, 2, 1],
rewards=[10, 17, 22, 9, 4, 0, 7, 8, 20, 9, 50, 5, 7, 12, 10],
learning_policy=LearningPolicy.Softmax(tau=1),
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(arm, 2)
layout_partial = [1, 2, 1, 2]
revenue_partial = [0, 12, 7, 19]
mab.partial_fit(decisions=layout_partial, rewards=revenue_partial)
mab.add_arm(3)
self.assertTrue(3 in mab.arms)
self.assertTrue(3 in mab._imp.arm_to_expectation.keys())
def test_lints(self):
train_df = pd.DataFrame({'ad': [1, 1, 1, 2, 4, 5, 3, 3, 2, 1, 4, 5, 3, 2, 5],
'revenues': [10, 17, 22, 9, 4, 20, 7, 8, 20, 9, 50, 5, 7, 12, 10],
'age': [22, 27, 39, 48, 21, 20, 19, 37, 52, 26, 18, 42, 55, 57, 38],
'click_rate': [0.2, 0.6, 0.99, 0.68, 0.15, 0.23, 0.75, 0.17,
0.33, 0.65, 0.56, 0.22, 0.19, 0.11, 0.83],
'subscriber': [1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0]}
)
# Test data to for new prediction
test_df = pd.DataFrame({'age': [37, 52], 'click_rate': [0.5, 0.6], 'subscriber': [0, 1]})
test_df_revenue = pd.Series([7, 13])
# Scale the data
scaler = StandardScaler()
train = scaler.fit_transform(np.asarray(train_df[['age', 'click_rate', 'subscriber']], dtype='float64'))
test = scaler.transform(np.asarray(test_df, dtype='float64'))
arms, mab = self.predict(arms=[1, 2, 3, 4, 5],
decisions=train_df['ad'],
rewards=train_df['revenues'],
learning_policy=LearningPolicy.LinTS(alpha=1.5),
context_history=train,
contexts=test,
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(arms, [5, 2])
mab.partial_fit(decisions=arms, rewards=test_df_revenue, contexts=test)
mab.add_arm(6)
self.assertTrue(6 in mab.arms)
self.assertTrue(6 in mab._imp.arm_to_expectation.keys())
def test_ts(self):
dec_to_threshold = {1: 10, 2: 20}
def binarize(dec, value):
return value >= dec_to_threshold[dec]
arm, mab = self.predict(arms=[1, 2],
decisions=[1, 1, 1, 2, 1, 2, 2, 1, 2, 1, 2, 2, 1, 2, 1],
rewards=[10, 17, 22, 9, 4, 0, 7, 8, 20, 9, 50, 5, 7, 12, 10],
learning_policy=LearningPolicy.ThompsonSampling(binarizer=binarize),
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(arm, 1)
layout_partial = [1, 2, 1, 2]
revenue_partial = [0, 12, 7, 19]
mab.partial_fit(decisions=layout_partial, rewards=revenue_partial)
# Updating of the model with new arm
def binary_func2(decision, reward):
if decision == 3:
return 1 if reward > 15 else 0
else:
return 1 if reward > 10 else 0
mab.add_arm(3, binary_func2)
self.assertTrue(3 in mab.arms)
self.assertTrue(3 in mab._imp.arm_to_expectation.keys())
def test_ts_binary(self):
arm, mab = self.predict(arms=[1, 2],
decisions=[1, 1, 1, 2, 1, 2, 2, 1, 2, 1, 2, 2, 1, 2, 1],
rewards=[1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1],
learning_policy=LearningPolicy.ThompsonSampling(),
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(arm, 1)
layout_partial = [1, 2, 1, 2]
revenue_partial = [0, 1, 0, 1]
mab.partial_fit(decisions=layout_partial, rewards=revenue_partial)
mab.add_arm(3)
self.assertTrue(3 in mab.arms)
self.assertTrue(3 in mab._imp.arm_to_expectation.keys())
def test_ucb1(self):
arm, mab = self.predict(arms=[1, 2],
decisions=[1, 1, 1, 2, 1, 2, 2, 1, 2, 1, 2, 2, 1, 2, 1],
rewards=[10, 17, 22, 9, 4, 0, 7, 8, 20, 9, 50, 5, 7, 12, 10],
learning_policy=LearningPolicy.UCB1(alpha=1.25),
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(arm, 2)
layout_partial = [1, 2, 1, 2]
revenue_partial = [0, 12, 7, 19]
mab.partial_fit(decisions=layout_partial, rewards=revenue_partial)
mab.add_arm(3)
self.assertTrue(3 in mab.arms)
self.assertTrue(3 in mab._imp.arm_to_expectation.keys())
def test_ts_series(self):
df = pd.DataFrame({'layouts': [1, 1, 1, 2, 1, 2, 2, 1, 2, 1, 2, 2, 1, 2, 1],
'revenues': [10, 17, 22, 9, 4, 0, 7, 8, 20, 9, 50, 5, 7, 12, 10]})
arm, mab = self.predict(arms=[1, 2],
decisions=df['layouts'],
rewards=df['revenues'],
learning_policy=LearningPolicy.EpsilonGreedy(epsilon=0.15),
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(arm, 2)
def test_ts_numpy(self):
arm, mab = self.predict(arms=[1, 2],
decisions=np.array([1, 1, 1, 2, 1, 2, 2, 1, 2, 1, 2, 2, 1, 2, 1]),
rewards=np.array([10, 17, 22, 9, 4, 0, 7, 8, 20, 9, 50, 5, 7, 12, 10]),
learning_policy=LearningPolicy.EpsilonGreedy(epsilon=0.15),
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(arm, 2)
def test_approximate(self):
train_df = pd.DataFrame({'ad': [1, 1, 1, 2, 4, 5, 3, 3, 2, 1, 4, 5, 3, 2, 5],
'revenues': [10, 17, 22, 9, 4, 20, 7, 8, 20, 9, 50, 5, 7, 12, 10],
'age': [22, 27, 39, 48, 21, 20, 19, 37, 52, 26, 18, 42, 55, 57, 38],
'click_rate': [0.2, 0.6, 0.99, 0.68, 0.15, 0.23, 0.75, 0.17,
0.33, 0.65, 0.56, 0.22, 0.19, 0.11, 0.83],
'subscriber': [1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0]}
)
# Test data to for new prediction
test_df = pd.DataFrame({'age': [37, 52], 'click_rate': [0.5, 0.6], 'subscriber': [0, 1]})
test_df_revenue = pd.Series([7, 13])
# Scale the data
scaler = StandardScaler()
train = scaler.fit_transform(np.asarray(train_df[['age', 'click_rate', 'subscriber']], dtype='float64'))
test = scaler.transform(np.asarray(test_df, dtype='float64'))
arms, mab = self.predict(arms=[1, 2, 3, 4, 5],
decisions=train_df['ad'],
rewards=train_df['revenues'],
learning_policy=LearningPolicy.UCB1(alpha=1.25),
neighborhood_policy=NeighborhoodPolicy.LSHNearest(n_tables=5, n_dimensions=5),
context_history=train,
contexts=test,
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(arms, [1, 4])
mab.partial_fit(decisions=arms, rewards=test_df_revenue, contexts=test)
mab.add_arm(6)
self.assertTrue(6 in mab.arms)
self.assertTrue(6 in mab._imp.arm_to_expectation.keys())
def test_radius(self):
train_df = pd.DataFrame({'ad': [1, 1, 1, 2, 4, 5, 3, 3, 2, 1, 4, 5, 3, 2, 5],
'revenues': [10, 17, 22, 9, 4, 20, 7, 8, 20, 9, 50, 5, 7, 12, 10],
'age': [22, 27, 39, 48, 21, 20, 19, 37, 52, 26, 18, 42, 55, 57, 38],
'click_rate': [0.2, 0.6, 0.99, 0.68, 0.15, 0.23, 0.75, 0.17,
0.33, 0.65, 0.56, 0.22, 0.19, 0.11, 0.83],
'subscriber': [1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0]}
)
# Test data to for new prediction
test_df = pd.DataFrame({'age': [37, 52], 'click_rate': [0.5, 0.6], 'subscriber': [0, 1]})
test_df_revenue = pd.Series([7, 13])
# Scale the data
scaler = StandardScaler()
train = scaler.fit_transform(np.asarray(train_df[['age', 'click_rate', 'subscriber']], dtype='float64'))
test = scaler.transform(np.asarray(test_df, dtype='float64'))
arms, mab = self.predict(arms=[1, 2, 3, 4, 5],
decisions=train_df['ad'],
rewards=train_df['revenues'],
learning_policy=LearningPolicy.UCB1(alpha=1.25),
neighborhood_policy=NeighborhoodPolicy.Radius(radius=5),
context_history=train,
contexts=test,
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(arms, [4, 4])
mab.partial_fit(decisions=arms, rewards=test_df_revenue, contexts=test)
mab.add_arm(6)
self.assertTrue(6 in mab.arms)
self.assertTrue(6 in mab._imp.arm_to_expectation.keys())
def test_nearest(self):
train_df = pd.DataFrame({'ad': [1, 1, 1, 2, 4, 5, 3, 3, 2, 1, 4, 5, 3, 2, 5],
'revenues': [10, 17, 22, 9, 4, 20, 7, 8, 20, 9, 50, 5, 7, 12, 10],
'age': [22, 27, 39, 48, 21, 20, 19, 37, 52, 26, 18, 42, 55, 57, 38],
'click_rate': [0.2, 0.6, 0.99, 0.68, 0.15, 0.23, 0.75, 0.17,
0.33, 0.65, 0.56, 0.22, 0.19, 0.11, 0.83],
'subscriber': [1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0]}
)
# Test data to for new prediction
test_df = pd.DataFrame({'age': [37, 52], 'click_rate': [0.5, 0.6], 'subscriber': [0, 1]})
test_df_revenue = pd.Series([7, 13])
# Scale the data
scaler = StandardScaler()
train = scaler.fit_transform(np.asarray(train_df[['age', 'click_rate', 'subscriber']], dtype='float64'))
test = scaler.transform(np.asarray(test_df, dtype='float64'))
arms, mab = self.predict(arms=[1, 2, 3, 4, 5],
decisions=train_df['ad'],
rewards=train_df['revenues'],
learning_policy=LearningPolicy.UCB1(alpha=1.25),
neighborhood_policy=NeighborhoodPolicy.KNearest(k=5),
context_history=train,
contexts=test,
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(arms, [5, 1])
mab.partial_fit(decisions=arms, rewards=test_df_revenue, contexts=test)
mab.add_arm(6)
self.assertTrue(6 in mab.arms)
self.assertTrue(6 in mab._imp.arm_to_expectation.keys())
def test_linucb_radius(self):
train_df = pd.DataFrame({'ad': [1, 1, 1, 2, 4, 5, 3, 3, 2, 1, 4, 5, 3, 2, 5],
'revenues': [10, 17, 22, 9, 4, 20, 7, 8, 20, 9, 50, 5, 7, 12, 10],
'age': [22, 27, 39, 48, 21, 20, 19, 37, 52, 26, 18, 42, 55, 57, 38],
'click_rate': [0.2, 0.6, 0.99, 0.68, 0.15, 0.23, 0.75, 0.17,
0.33, 0.65, 0.56, 0.22, 0.19, 0.11, 0.83],
'subscriber': [1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0]}
)
# Test data to for new prediction
test_df = pd.DataFrame({'age': [37, 52], 'click_rate': [0.5, 0.6], 'subscriber': [0, 1]})
# Scale the data
scaler = StandardScaler()
train = scaler.fit_transform(np.asarray(train_df[['age', 'click_rate', 'subscriber']], dtype='float64'))
test = scaler.transform(np.asarray(test_df, dtype='float64'))
arms, mab = self.predict(arms=[1, 2, 3, 4, 5],
decisions=train_df['ad'],
rewards=train_df['revenues'],
learning_policy=LearningPolicy.LinUCB(alpha=1.25),
neighborhood_policy=NeighborhoodPolicy.Radius(radius=1),
context_history=train,
contexts=test,
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(arms, [1, 2])
def test_linucb_knearest(self):
train_df = pd.DataFrame({'ad': [1, 1, 1, 2, 4, 5, 3, 3, 2, 1, 4, 5, 3, 2, 5],
'revenues': [10, 17, 22, 9, 4, 20, 7, 8, 20, 9, 50, 5, 7, 12, 10],
'age': [22, 27, 39, 48, 21, 20, 19, 37, 52, 26, 18, 42, 55, 57, 38],
'click_rate': [0.2, 0.6, 0.99, 0.68, 0.15, 0.23, 0.75, 0.17,
0.33, 0.65, 0.56, 0.22, 0.19, 0.11, 0.83],
'subscriber': [1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0]}
)
# Test data to for new prediction
test_df = pd.DataFrame({'age': [37, 52], 'click_rate': [0.5, 0.6], 'subscriber': [0, 1]})
# Scale the data
scaler = StandardScaler()
train = scaler.fit_transform(np.asarray(train_df[['age', 'click_rate', 'subscriber']], dtype='float64'))
test = scaler.transform(np.asarray(test_df, dtype='float64'))
arms, mab = self.predict(arms=[1, 2, 3, 4, 5],
decisions=train_df['ad'],
rewards=train_df['revenues'],
learning_policy=LearningPolicy.LinUCB(alpha=1.25),
neighborhood_policy=NeighborhoodPolicy.KNearest(k=4),
context_history=train,
contexts=test,
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(arms, [1, 2])
def test_lints_radius(self):
train_df = pd.DataFrame({'ad': [1, 1, 1, 2, 4, 5, 3, 3, 2, 1, 4, 5, 3, 2, 5],
'revenues': [10, 17, 22, 9, 4, 20, 7, 8, 20, 9, 50, 5, 7, 12, 10],
'age': [22, 27, 39, 48, 21, 20, 19, 37, 52, 26, 18, 42, 55, 57, 38],
'click_rate': [0.2, 0.6, 0.99, 0.68, 0.15, 0.23, 0.75, 0.17,
0.33, 0.65, 0.56, 0.22, 0.19, 0.11, 0.83],
'subscriber': [1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0]}
)
# Test data to for new prediction
test_df = | pd.DataFrame({'age': [37, 52], 'click_rate': [0.5, 0.6], 'subscriber': [0, 1]}) | pandas.DataFrame |
# coding: utf-8
# author: wamhanwan
"""Tushare API"""
import tushare as ts
import pandas as pd
import numpy as np
from time import sleep
from FactorLib.utils.tool_funcs import get_members_of_date
from functools import update_wrapper
_token = '6135b90bf40bb5446ef2fe7aa20a9467ad10023eda97234739743f46'
SHEXG = 'SSE' # 上交所代码
SZEXG = 'SZSE' # 深交所代码
ts.set_token(_token)
pro_api = ts.pro_api()
def set_call_limit(max_times=60, sleep_seconds=60):
if isinstance(max_times, int):
if max_times < 0:
max_times = 1
else:
raise TypeError("Expected max_times to be an integer")
def decorating_function(user_function):
wrapper = _time_limit_wrapper(user_function, max_times, sleep_seconds)
return update_wrapper(wrapper, user_function)
return decorating_function
def _time_limit_wrapper(user_function, max_times, sleep_seconds):
times = 0
def wrapper(*args, **kwargs):
nonlocal times
if times == max_times:
print(f"sleep {sleep_seconds} sceonds")
sleep(sleep_seconds)
times = 0
times += 1
return user_function(*args, **kwargs)
return wrapper
class TushareDB(object):
_instance = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = object.__new__(cls)
return cls._instance
return cls._instance
def __init__(self):
self._api = ts
self._pro_api = pro_api
self._token = _token
@classmethod
def get_instance(cls):
return TushareDB()
def run_api(self, api_name, *args, **kwargs):
return getattr(self._pro_api, api_name)(*args, **kwargs)
def format_date(self, data, column=None):
data[column] = pd.to_datetime(data[column], format='%Y%m%d')
return data
def stock_basic_get(self, list_status=None, exchange=None, is_hs=None,
fields=None):
"""基础数据-股票列表(只能取最新数据)
Parameters:
----------
list_status: str
上市状态 L上市 D退市 P暂停上市
exchange: str
交易所 SHEXG上交所 SZEXG深交所
is_hs: str
是否深港通标的 N否 H沪股通 S深股通
Fields:
------
symbol 股票代码(ticker)
name 股票简称
industry 行业
list_status 上市状态
list_date 上市日期
delist_date 退市日期
is_hs 是否深港通标的
"""
data1 = self.run_api('stock_basic', list_status='L', exchange=exchange, is_hs=is_hs,
fields=fields)
data2 = self.run_api('stock_basic', list_status='P', exchange=exchange, is_hs=is_hs,
fields=fields)
data3 = self.run_api('stock_basic', list_status='D', exchange=exchange, is_hs=is_hs,
fields=fields)
l = [data1,data2,data3]
if list_status:
status_index =[['L', 'P', 'D'].index(x) for x in ','.split(list_status)]
return pd.concat([l[i] for i in status_index]).sort_values('symbol')
return pd.concat(l).sort_values('symbol')
def stock_st_get(self, date):
"""A股戴帽摘帽
Paramters:
---------
date: str
日期 YYYYMMDD
Returns:
------
DataFrame: IDs name start_date end_date
"""
data = self.run_api('namechange', end_date=date)
data = data[data['name'].str.contains('ST')]
data = data.fillna({'end_date': '21001231'})
data = data[(data['start_date']<=date)&(data['end_date']>=date)]
data = data[~data['ts_code'].duplicated(keep='last')]
data['IDs'] = data['ts_code'].str[:6]
return data
def stock_onlist_get(self, date):
"""A股特定日期的股票列表
Return
------
DataFrame: symbol name list_date delist_date
"""
all_stocks = self.stock_basic_get(
fields='symbol,name,list_date,delist_date'
).fillna({'delist_date':'21001231'})
indices = (all_stocks['list_date']<=date)&(all_stocks['delist_date']>date)
return all_stocks[indices]
def index_weight_get(self, index_code, date):
"""A股指数成分股权重
Parameters:
-----------
index_code: str
指数代码 399300.SZ沪深300 000905.SH中证500 000906.SH中证800
date: str
日期 YYYYMMDD
Returns:
--------
DataFrame index_code con_code trade_date weight
"""
start_date = (pd.to_datetime(date)- | pd.Timedelta(days=30) | pandas.Timedelta |
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 19 20:34:46 2021
@author: tapan
"""
from pydoc import doc
import sys
import nltk, re
import pandas as pd
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from pdf_text_extractor import convert_pdf_to_text # type: ignore
class Parse():
inputDF = pd.DataFrame()
def __init__(self,verbose=False):
print(" Starting Program ")
self.tokenizedDF = self.tokenize(self.readResumeFiles())
for index, text, tokens in self.tokenizedDF.itertuples():
print("Started processing document %s" %index)
#handle name extraction --
name = self.extract_name(text)
#handle email extraction--
email = self.extract_email(text)
#handle phone number extraction--
phone = self.extract_phone(text)
#handle experience extraction --
experience = self.extract_experience(text)
#handle skills extraction--
skills = self.extract_skills(text)
#handle qualification extraction--
qualification = self.extract_qualification(text)
extractedInfo = self.getInfo("fileName " + str(index), name, email, phone, experience, skills, qualification) # TODO -> move this to util
print(extractedInfo) #TODO -> remove this print
#TODO -> Dump all jsonRespnses to csv or excel sheet
def readResumeFiles(self):
try:
return convert_pdf_to_text()
except:
return ''
pass
def preprocess(self, document):
df = | pd.DataFrame() | pandas.DataFrame |
"""
Functions for writing to .csv
September 2020
Written by <NAME>
"""
import os
import pandas as pd
import datetime
def define_deciles(regions):
"""
Allocate deciles to regions.
"""
regions = regions.sort_values(by='population_km2', ascending=True)
regions['decile'] = regions.groupby([
'GID_0',
'scenario',
'strategy',
'confidence'
], as_index=True).population_km2.apply( #cost_per_sp_user
pd.qcut, q=11, precision=0,
labels=[100,90,80,70,60,50,40,30,20,10,0],
duplicates='drop') # [0,10,20,30,40,50,60,70,80,90,100]
return regions
def write_mno_demand(regional_annual_demand, folder, metric, path):
"""
Write all annual demand results for a single hypothetical Mobile
Network Operator (MNO).
"""
print('Writing annual_demand')
regional_annual_demand = pd.DataFrame(regional_annual_demand)
regional_annual_demand = regional_annual_demand[[
'GID_0', 'GID_id', 'scenario', 'strategy',
'confidence', 'year', 'population', 'area_km2', 'population_km2',
'geotype', 'arpu_discounted_monthly', 'penetration', 'population_with_phones',
'phones_on_network', 'smartphone_penetration', 'population_with_smartphones',
'smartphones_on_network', 'revenue'
]]
regional_annual_demand.to_csv(path, index=False)
def write_results(regional_results, folder, metric):
"""
Write all results.
"""
print('Writing national MNO results')
national_results = pd.DataFrame(regional_results)
national_results = national_results[[
'GID_0', 'scenario', 'strategy', 'confidence', 'population', 'area_km2',
'phones_on_network', 'smartphones_on_network', 'total_estimated_sites',
'existing_mno_sites', 'upgraded_mno_sites', 'new_mno_sites',
'mno_network_capex', 'mno_network_opex','mno_network_cost',
'total_mno_revenue', 'total_mno_cost',
]]
national_results = national_results.drop_duplicates()
national_results = national_results.groupby([
'GID_0', 'scenario', 'strategy', 'confidence'], as_index=True).sum()
national_results['cost_per_network_user'] = (
national_results['total_mno_cost'] / national_results['phones_on_network'])
national_results['cost_per_smartphone_user'] = (
national_results['total_mno_cost'] / national_results['smartphones_on_network'])
path = os.path.join(folder,'national_mno_results_{}.csv'.format(metric))
national_results.to_csv(path, index=True)
print('Writing national cost composition results')
national_cost_results = pd.DataFrame(regional_results)
national_cost_results = national_cost_results[[
'GID_0', 'scenario', 'strategy', 'confidence', 'population',
'phones_on_network', 'smartphones_on_network', 'total_mno_revenue',
'ran_capex', 'ran_opex', 'backhaul_capex', 'backhaul_opex',
'civils_capex', 'core_capex', 'core_opex',
'administration', 'spectrum_cost', 'tax', 'profit_margin',
'mno_network_capex', 'mno_network_opex',
'mno_network_cost',
'available_cross_subsidy', 'deficit',
'used_cross_subsidy', 'required_state_subsidy', 'total_mno_cost'
]]
national_cost_results = national_cost_results.drop_duplicates()
national_cost_results = national_cost_results.groupby([
'GID_0', 'scenario', 'strategy', 'confidence'], as_index=True).sum()
national_cost_results['cost_per_network_user'] = (
national_cost_results['total_mno_cost'] /
national_cost_results['phones_on_network'])
national_cost_results['cost_per_smartphone_user'] = (
national_cost_results['total_mno_cost'] /
national_cost_results['smartphones_on_network'])
#Calculate private, govt and financial costs
national_cost_results['private_cost'] = national_cost_results['total_mno_cost']
national_cost_results['government_cost'] = (
national_cost_results['required_state_subsidy'] -
(national_cost_results['spectrum_cost'] + national_cost_results['tax']))
national_cost_results['financial_cost'] = (
national_cost_results['private_cost'] + national_cost_results['government_cost'])
# national_cost_results['required_efficiency_saving'] = (
# national_cost_results['government_cost'] /
# national_cost_results['private_cost'] * 100)
path = os.path.join(folder,'national_mno_cost_results_{}.csv'.format(metric))
national_cost_results.to_csv(path, index=True)
print('Writing general decile results')
decile_results = pd.DataFrame(regional_results)
decile_results = define_deciles(decile_results)
decile_results = decile_results[[
'GID_0', 'scenario', 'strategy', 'decile', 'confidence',
'population', 'area_km2', 'phones_on_network',
'smartphones_on_network', 'total_estimated_sites',
'existing_mno_sites', 'upgraded_mno_sites', 'new_mno_sites',
'total_mno_revenue',
'mno_network_capex', 'mno_network_opex', 'mno_network_cost',
'total_mno_cost',
]]
decile_results = decile_results.drop_duplicates()
decile_results = decile_results.groupby([
'GID_0', 'scenario', 'strategy', 'confidence', 'decile'], as_index=True).sum()
decile_results['population_km2'] = (
decile_results['population'] / decile_results['area_km2'])
decile_results['phone_density_on_network_km2'] = (
decile_results['phones_on_network'] / decile_results['area_km2'])
decile_results['sp_density_on_network_km2'] = (
decile_results['smartphones_on_network'] / decile_results['area_km2'])
decile_results['total_estimated_sites_km2'] = (
decile_results['total_estimated_sites'] / decile_results['area_km2'])
decile_results['existing_mno_sites_km2'] = (
decile_results['existing_mno_sites'] / decile_results['area_km2'])
decile_results['cost_per_network_user'] = (
decile_results['total_mno_cost'] / decile_results['phones_on_network'])
decile_results['cost_per_smartphone_user'] = (
decile_results['total_mno_cost'] / decile_results['smartphones_on_network'])
path = os.path.join(folder,'decile_mno_results_{}.csv'.format(metric))
decile_results.to_csv(path, index=True)
print('Writing cost decile results')
decile_cost_results = pd.DataFrame(regional_results)
decile_cost_results = define_deciles(decile_cost_results)
decile_cost_results = decile_cost_results[[
'GID_0', 'scenario', 'strategy', 'decile', 'confidence',
'population', 'area_km2', 'phones_on_network', 'smartphones_on_network',
'total_mno_revenue', 'ran_capex', 'ran_opex', 'backhaul_capex',
'backhaul_opex', 'civils_capex', 'core_capex', 'core_opex',
'administration', 'spectrum_cost', 'tax', 'profit_margin',
'mno_network_capex', 'mno_network_opex', 'mno_network_cost',
'total_mno_cost',
'available_cross_subsidy', 'deficit', 'used_cross_subsidy',
'required_state_subsidy',
]]
decile_cost_results = decile_cost_results.drop_duplicates()
decile_cost_results = decile_cost_results.groupby([
'GID_0', 'scenario', 'strategy', 'confidence', 'decile'], as_index=True).sum()
decile_cost_results['cost_per_network_user'] = (
decile_cost_results['total_mno_cost'] / decile_cost_results['phones_on_network'])
decile_cost_results['cost_per_smartphone_user'] = (
decile_cost_results['total_mno_cost'] / decile_cost_results['smartphones_on_network'])
decile_cost_results['private_cost'] = decile_cost_results['total_mno_cost']
decile_cost_results['government_cost'] = (
decile_cost_results['required_state_subsidy'] -
(decile_cost_results['spectrum_cost'] + decile_cost_results['tax']))
decile_cost_results['financial_cost'] = (
decile_cost_results['private_cost'] + decile_cost_results['government_cost'])
path = os.path.join(folder,'decile_mno_cost_results_{}.csv'.format(metric))
decile_cost_results.to_csv(path, index=True)
print('Writing regional results')
regional_mno_results = | pd.DataFrame(regional_results) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
This file is part of the Shotgun Lipidomics Assistant (SLA) project.
Copyright 2020 <NAME> (UCLA), <NAME> (UCLA), <NAME> (UW).
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
# from pyopenms import *
import os
# import tkinter as tk
# from tkinter import ttk
from tkinter import messagebox
# from tkinter.messagebox import showinfo
from tkinter import *
# import matplotlib.pyplot as plt
# import matplotlib
# matplotlib.use("Agg")
from tkinter import filedialog
# import glob
import re
# import statistics
import datetime
# from matplotlib.pyplot import cm
# import seaborn as sns
def imp_map(maploc):
# map1 = filedialog.askopenfilename(filetypes=(("excel Files", "*.xlsx"),("all files", "*.*")))
maploc.configure(state="normal")
maploc.delete(1.0, END)
map1 = filedialog.askopenfilename(filetypes=(("excel Files", "*.xlsx"), ("all files", "*.*")))
maploc.insert(INSERT, map1)
maploc.configure(state="disabled")
def imp_method1(method1loc):
# file1 = filedialog.askopenfilename(filetypes=(("excel Files", "*.xlsx"),("all files", "*.*")))
method1loc.configure(state="normal")
method1loc.delete(1.0, END)
file1 = filedialog.askopenfilename(filetypes=(("excel Files", "*.xlsx"), ("all files", "*.*")))
method1loc.insert(INSERT, file1)
method1loc.configure(state="disabled")
def imp_method2(method2loc):
# file2 = filedialog.askopenfilename(filetypes=(("excel Files", "*.xlsx"),("all files", "*.*")))
method2loc.configure(state="normal")
method2loc.delete(1.0, END)
file2 = filedialog.askopenfilename(filetypes=(("excel Files", "*.xlsx"), ("all files", "*.*")))
method2loc.insert(INSERT, file2)
method2loc.configure(state="disabled")
def set_dir(dirloc_aggregate):
# setdir = filedialog.askdirectory()
dirloc_aggregate.configure(state="normal")
dirloc_aggregate.delete(1.0, END)
setdir = filedialog.askdirectory()
dirloc_aggregate.insert(INSERT, setdir)
dirloc_aggregate.configure(state="disabled")
def MergeApp(dirloc_aggregate, proname, method1loc, method2loc, maploc, CheckClustVis):
start = datetime.datetime.now()
os.chdir(dirloc_aggregate.get('1.0', 'end-1c'))
project = proname.get()
file1 = method1loc.get('1.0', 'end-1c')
file2 = method2loc.get('1.0', 'end-1c')
map1 = maploc.get('1.0', 'end-1c')
# Fix sample index(name) type for proper merge
def qcname(indname):
# if 'QC_SPIKE' in str(indname):
# return('QC_SPIKE')
# elif 'QC' in str(indname):
# return('QC')
# elif 'b' in str(indname) or 'm' in str(indname):
if re.search('[a-zA-Z]', str(indname)):
return (str(indname))
else:
return (int(indname))
# Import DataFrames from file1
spequant1 = pd.read_excel(file1, sheet_name='Lipid Species Concentrations', header=0, index_col=0, na_values='.')
specomp1 = pd.read_excel(file1, sheet_name='Lipid Species Composition', header=0, index_col=0, na_values='.')
claquant1 = pd.read_excel(file1, sheet_name='Lipid Class Concentration', header=0, index_col=0, na_values='.')
faquant1 = pd.read_excel(file1, sheet_name='Fatty Acid Concentration', header=0, index_col=0, na_values='.')
facomp1 = pd.read_excel(file1, sheet_name='Fatty Acid Composition', header=0, index_col=0, na_values='.')
spequant1.index = list(map(qcname, list(spequant1.index)))
specomp1.index = list(map(qcname, list(specomp1.index)))
claquant1.index = list(map(qcname, list(claquant1.index)))
faquant1.index = list(map(qcname, list(faquant1.index)))
facomp1.index = list(map(qcname, list(facomp1.index)))
# Import DataFrames from file2
if file2 != '':
spequant2 = pd.read_excel(file2, sheet_name='Lipid Species Concentrations', header=0, index_col=0,
na_values='.')
specomp2 = pd.read_excel(file2, sheet_name='Lipid Species Composition', header=0, index_col=0, na_values='.')
claquant2 = pd.read_excel(file2, sheet_name='Lipid Class Concentration', header=0, index_col=0, na_values='.')
faquant2 = pd.read_excel(file2, sheet_name='Fatty Acid Concentration', header=0, index_col=0, na_values='.')
facomp2 = pd.read_excel(file2, sheet_name='Fatty Acid Composition', header=0, index_col=0, na_values='.')
spequant2.index = list(map(qcname, list(spequant2.index)))
specomp2.index = list(map(qcname, list(specomp2.index)))
claquant2.index = list(map(qcname, list(claquant2.index)))
faquant2.index = list(map(qcname, list(faquant2.index)))
facomp2.index = list(map(qcname, list(facomp2.index)))
else:
spequant2 = pd.DataFrame()
specomp2 = pd.DataFrame()
claquant2 = pd.DataFrame()
faquant2 = pd.DataFrame()
facomp2 = pd.DataFrame()
# Merge DataFrames
spequant = pd.concat([spequant1, spequant2], axis=1, sort=False)
specomp = pd.concat([specomp1, specomp2], axis=1, sort=False)
claquant = pd.concat([claquant1, claquant2], axis=1, sort=False)
faquant = pd.concat([faquant1, faquant2], axis=1, sort=False)
facomp = pd.concat([facomp1, facomp2], axis=1, sort=False)
# Sort Columns in Merged DataFrames
spequant = spequant.reindex(sorted(spequant.columns), axis=1)
specomp = specomp.reindex(sorted(specomp.columns), axis=1)
claquant = claquant.reindex(sorted(claquant.columns), axis=1)
clacomp = claquant.apply(lambda x: 100 * x / x.sum(), axis=1) # get class composit
faquant = faquant.reindex(sorted(faquant.columns), axis=1)
facomp = facomp.reindex(sorted(facomp.columns), axis=1)
# Write Master data sheet
# master = pd.ExcelWriter(project+'_master.xlsx')
# spequant.to_excel(master, 'Species Quant')
# specomp.to_excel(master, 'Species Composit')
# claquant.to_excel(master, 'Class Quant')
# clacomp.to_excel(master, 'Class Composit')
# faquant.to_excel(master, 'FattyAcid Quant')
# facomp.to_excel(master, 'FattyAcid Composit')
# master.save()
# print('master sheet saved')
# Import Map
sampinfo = pd.read_excel(map1, sheet_name=0, header=1, index_col=0, na_values='.')
# Exp name dict
expname = dict(zip(sampinfo.ExpNum, sampinfo.ExpName))
sampinfo = sampinfo.drop(['ExpName'], axis=1)
sampinfo.index = list(map(qcname, list(sampinfo.index)))
sampinfo['SampleNorm'] = sampinfo['SampleNorm'].astype('float64')
# Create Normalized Sheets
# spenorm = spequant[list(map(lambda x: isinstance(x, int), spequant.index))].copy()
# #exclude sample with string name
# clanorm = claquant[list(map(lambda x: isinstance(x, int), claquant.index))].copy()
# fanorm = faquant[list(map(lambda x: isinstance(x, int), faquant.index))].copy()
spenorm = spequant.copy() # inlude all samples
clanorm = claquant.copy()
fanorm = faquant.copy()
spenorm = spenorm.divide(
40) # x0.025 to reverse /0.025 in the standard coef. /0.025 is there to simulate LWM result
clanorm = clanorm.divide(40)
fanorm = fanorm.divide(40)
spenorm = spenorm.divide(sampinfo['SampleNorm'], axis='index')
clanorm = clanorm.divide(sampinfo['SampleNorm'], axis='index')
fanorm = fanorm.divide(sampinfo['SampleNorm'], axis='index')
# Fix GroupName. If GroupName and GroupNum doesn't match, change GroupName to match GroupNum
for i in sampinfo['ExpNum'].unique().astype(int):
for ii in sampinfo.loc[sampinfo['ExpNum'] == i, 'GroupNum'].unique().astype(int):
gNamlogic = np.logical_and(sampinfo['GroupNum'] == ii, sampinfo['ExpNum'] == i)
sampinfo.loc[gNamlogic, "GroupName"] = sampinfo['GroupName'][gNamlogic].reset_index(drop=True)[0]
# for i in range(1, int(max(sampinfo['ExpNum'])) + 1):
# for ii in range(1, int(max(sampinfo.loc[sampinfo['ExpNum'] == i, 'GroupNum'])) + 1):
# gNamlogic = np.logical_and(sampinfo['GroupNum'] == ii, sampinfo['ExpNum'] == i)
# sampinfo.loc[gNamlogic, "GroupName"] = sampinfo['GroupName'][gNamlogic].reset_index(drop=True)[0]
# Merge Map, using index (Sample column in map and sample name in raw data)
spequantin = pd.concat([sampinfo, spequant], axis=1, sort=False, join='inner')
specompin = pd.concat([sampinfo, specomp], axis=1, sort=False, join='inner')
claquantin = pd.concat([sampinfo, claquant], axis=1, sort=False, join='inner')
clacompin = pd.concat([sampinfo, clacomp], axis=1, sort=False, join='inner')
faquantin = pd.concat([sampinfo, faquant], axis=1, sort=False, join='inner')
facompin = pd.concat([sampinfo, facomp], axis=1, sort=False, join='inner')
spenormin = pd.concat([sampinfo, spenorm], axis=1, sort=False, join='inner')
clanormin = | pd.concat([sampinfo, clanorm], axis=1, sort=False, join='inner') | pandas.concat |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from copy import deepcopy
from glob import glob
import sys, os
from copy import deepcopy
from scipy.stats import pearsonr,spearmanr
import warnings
warnings.filterwarnings("ignore")
blue, orange, green, red, purple, brown, _, _, _, _=sns.color_palette()
palette = [brown, blue, red, green,orange, purple, [0,0,0]]
def get_packages():
tmp = pd.read_csv(os.environ['ETERNABENCH_PATH']+'/eternabench/package_metadata.csv')
tmp = tmp.set_index('package')
return tmp
def get_external_Dataset_data():
return pd.read_csv(os.environ['ETERNABENCH_PATH']+'/eternabench/external_dataset_metadata.csv')
def get_palette():
return palette, ['Other','ViennaRNA','NUPACK','RNAstructure','CONTRAfold','RNAsoft', 'EternaFold']
def label_subplot(letter,fontsize=16):
plt.annotate(letter, xy=(-.25, 1.), xycoords='axes fraction', horizontalalignment='left', verticalalignment='top', fontsize=fontsize)
def reactivity_heatmap(df, ind_range=None, **kwargs):
'''Plot heatplot image of reactivities.
Input: full_df style dataframe.'''
if ind_range is None:
start,finish = 0,-1
else:
start, finish=ind_range
max_len = np.max([len(x) for x in df['reactivity'][start:finish]])
arr= []
for x in df['reactivity'][start:finish]:
arr.append(np.concatenate([x,np.zeros(max_len-len(x))]))
plt.imshow(np.array(arr), cmap='gist_heat_r',vmin=0,vmax=1.5, **kwargs)
plt.ylabel('Construct')
plt.xlabel('Sequence position')
def punpaired_heatmap(df, ind_range=None, package='vienna_2', **kwargs):
'''Plot heatplot image of predicted p(unp) values.
Input: full_df style dataframe.'''
if ind_range is None:
start,finish = 0,-1
else:
start, finish=ind_range
max_len = np.max([len(x) for x in df['reactivity'][start:finish]])
arr= []
for x in df['p_%s'% package][start:finish]:
arr.append(np.concatenate([x,np.zeros(max_len-len(x))]))
plt.imshow(np.array(arr), cmap='gist_heat_r',vmin=0,vmax=1, **kwargs)
plt.xlabel('Construct')
plt.ylabel('Sequence position')
def create_array(df_o, col1, col2, value, col1_subset=None, col2_subset=None):
'''Create array of `value` from dataframe.
First index is col1, subset, second index is col2 '''
if col1_subset is None:
col1_subset = list(df_o[col1].unique())
if col2_subset is None:
col2_subset = list(df_o[col2].unique())
arr = np.nan*np.ones([len(col1_subset),len(col2_subset)])
for i, col1_val in enumerate(col1_subset):
for j, col2_val in enumerate(col2_subset):
tmp = df_o.loc[df_o[col1]==col1_val][df_o[col2] == col2_val]
if len(tmp)>0:
assert len(tmp)==1
arr[i,j] = tmp[value].values[0]
return arr, col1_subset, col2_subset
def single_barplot(df, cat, val, err, titles, **kwargs):
u = df[cat].unique()
x = np.arange(len(u))
offsets = (np.arange(len(u))-np.arange(len(u)).mean())/(len(u)+1.)
plt.barh(x,df[val].values, xerr=df[err].values, **kwargs)
plt.ylim([-0.5,len(x)-0.5])
def ranked_heatmap(zscores, metric='pearson_zscore_by_Dataset_mean', package_order=None,
dataset_field='Dataset',
dataset_order=None,
figsize=None, width_ratios=None, cbar_loc=[-0.1,0.05], fig=None, axes=None,
vmin=None,vmax=None, ext=False,dataset_labels=None):
'''Plot heatmap of packages ranked over datasets, with best at top.'''
if package_order is not None:
zsc_copy=pd.DataFrame()
for pkg in package_order:
zsc_copy= zsc_copy.append(zscores.loc[zscores.package==pkg],ignore_index=True)
zscores = deepcopy(zsc_copy)
if dataset_order is not None:
zsc_copy=pd.DataFrame()
for ds in dataset_order:
zsc_copy= zsc_copy.append(zscores.loc[zscores[dataset_field]==ds],ignore_index=True)
zscores = deepcopy(zsc_copy)
if figsize is None or width_ratios is None:
n=len(zscores[dataset_field].unique())
k = len(zscores.package.unique())
figsize=(.3*n+2,.3*k-1)
width_ratios = [.3*n, 2]
tmp, package_list, dataset_list = create_array(zscores, 'package', dataset_field, metric)
if axes is None:
gridkw = dict(width_ratios=width_ratios)
fig, (ax1, ax2) = plt.subplots(1,2, gridspec_kw=gridkw, figsize=figsize)
else:
ax1, ax2 = axes
package_data = get_packages()
palette, hue_order = get_palette()
package_titles=[]
for pkg in package_list:
try:
title = package_data.loc[pkg]['title']
package_titles.append(title)
except:
package_titles.append(pkg)
color_range = np.max(np.abs(tmp))
if vmin is None:
vmin=-1*color_range
if vmax is None:
vmax = color_range
im = ax1.imshow(tmp, cmap='seismic_r', vmin = vmin, vmax=vmax,origin='upper')
ax1.set_yticks(range(len(package_titles)))
ax1.set_yticklabels(package_titles)
ax1.set_xticks(range(len(dataset_list)))
if ext:
ext_data = get_external_Dataset_data()
ax1.set_xticklabels([ext_data.loc[ext_data['Dataset']==k].Title.values[0] for k in dataset_list],rotation=45,horizontalalignment='right')
else:
if dataset_labels is not None:
ax1.set_xticklabels(dataset_labels, rotation=45, horizontalalignment='right')
else:
ax1.set_xticklabels(dataset_list,rotation=45,horizontalalignment='right')
cbaxes = fig.add_axes([*cbar_loc, 0.15, 0.03])
fig.colorbar(im, cax = cbaxes,orientation='horizontal', label='Z-score')
#ranking = ranking.merge(package_data, on='package')
zscores = zscores.merge(package_data, on='package')
#ranking['category'] = [hue_order[i] for i in ranking['category']]
zscores['category'] = [hue_order[i] for i in zscores['category']]
sns.barplot(y='title', x=metric, hue='category', dodge=False, data=zscores,
ax=ax2, palette=palette, hue_order=hue_order, linewidth=0)
ax2.yaxis.set_ticks_position('right')
ax2.set_ylabel('')
ax2.axvline(0,color='k',linewidth=0.5,linestyle=':')
ax2.set_xlabel('Avg. Z-score')
ax2.set_xlim([vmin,vmax])
ax2.legend([],[], frameon=False)
def ranked_heatmap_w_bar_overhead(zscores, metric='pearson_zscore_by_Dataset_mean', package_order=None,
dataset_field='Dataset', figsize=(7,5), width_ratios=[2,1], cbar_loc=[-0.1,0.05],
vmin=None,vmax=None, ext=False,barplot_ymax=0.75, barplot_ylabel='Mean Corr.',RMSE=False, rainbow=False):
'''Plot heatmap of packages ranked over datasets, with best at top.'''
if RMSE:
package_order = list(reversed(package_order))
zscores[metric] *= -1
#ranking[metric] *= -1
if package_order is not None:
zsc_copy= | pd.DataFrame() | pandas.DataFrame |
import time
import sqlite3
import pandas as pd
import numpy as np
import scipy as sp
from scipy import stats
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
'''
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier, GradientBoostingClassifier)
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.pipeline import make_pipeline
'''
traindataframes = []
testDataFrame = []
max = 20
#def predict_next_day():
#return 0
def denormalize_features(features):
frames = []
for i,r in enumerate(traindataframes):
denormalized_features = []
price_col = r['Current_price']
price_col = price_col[0:max]
change_col = r['Today_price']
change_col = change_col[0:max]
#parse data and remove + sign
for j in change_col:
if '+' in j:
t = j.replace('+','')
change_col = change_col.replace(str(j),t)
for j in change_col:
change_col = change_col.replace(str(j),float(j))
for j,element in enumerate(price_col):
initial_price = element - change_col[j]
#closing_price = element
#normalized = ((closing_price/initial_price)-1)
closing_price = (features[i][j] + 1) * initial_price
denormalized_features.append(closing_price)
frames.append(denormalized_features)
features = | pd.DataFrame(frames) | pandas.DataFrame |
import pandas as pd
import re
import sys
from pymicruler.utils import util
from pymicruler.bp.BlockProcessor import BlockProcessor as BP
from pymicruler.bp.BlockInterpreter import BlockInterpreter as BI
from pymicruler.bp.NoteAnalysis import NoteAnalysis as NA
from pymicruler.bp.TaxonomyHandler import TaxonomyHandler as TH
from pymicruler.bp.ResourceCompiler import ResourceCompiler as RC
class EucastParser:
def __init__(self):
self.ires = pd.read_csv(util.Path.IRES.value)
self.note_analyser = NA()
self.b_i = BI()
self.r_c = RC()
self.all_sheets = pd.DataFrame()
self.table = pd.DataFrame()
self.guidelines = pd.DataFrame()
def run_eucast_parser(self, bp_path):
"""
Starts the parsing workflow for a given Eucast breakpoint table.
:param bp_path: Path to Eucast breakpoint table.
:type: String
:return: Parsed breakpoints
:rtype: Pandas DataFrame
"""
self.all_sheets = pd.DataFrame()
raw_table = pd.read_excel(
bp_path, sheet_name=None, na_values=[''], keep_default_na=False)
relevant_sheets = self._filter_sheets(raw_table)
for title, sheet in relevant_sheets.items():
organism = list(sheet)[0]
note_col = self._column_check(sheet)
reduced_sheet = self._column_removal(sheet, note_col)
breakpoints = self._process_sheet(reduced_sheet, organism)
self.all_sheets = | pd.concat((self.all_sheets, breakpoints)) | pandas.concat |
#!/usr/bin/env python
from bs4 import BeautifulSoup
import glob
import pandas as pd
import re
import sys
from parsers import parse_totals, parse_tests
from util import normalize_int
def is_testing_table(table):
headers = [th.text for th in table.findAll("th")]
return "Tests" in headers
# Use the historical HTML files to generate a CSV.
# Some pages cannot be handled by the parser so they are filled in manually.
def generate_csv():
print("Date,Country,DailyTestsPerformed,TotalTestsPerformed,DailyPeopleTested,TotalPeopleTested")
for file in sorted(glob.glob("data/raw/coronavirus-covid-19-number-of-cases-in-uk-*.html")):
m = re.match(r".+(\d{4}-\d{2}-\d{2})\.html", file)
date = m.group(1)
with open(file) as f:
html = f.read()
if date <= "2020-03-22":
# older pages cannot be parsed with current parser
continue
if date <= "2020-04-07":
result = parse_totals("UK", html)
print("{},UK,,,,{}".format(date, result["Tests"]))
continue
result = parse_tests("UK", html)
output_row = [date, "UK", result["DailyTestsPerformed"], result["TotalTestsPerformed"], result["DailyPeopleTested"], result["TotalPeopleTested"]]
print(",".join([str(val) for val in output_row]))
def load_owid():
use_local = False
if use_local:
file = "data/raw/owid/covid-testing-all-observations.csv"
else:
file = "https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/testing/covid-testing-all-observations.csv"
df = pd.read_csv(file)
df = df[(df["Entity"] == "United Kingdom - people tested") | (df["Entity"] == "United Kingdom - tests performed")]
df = df[["Date", "Entity", "Cumulative total", "Daily change in cumulative total"]]
df.rename(columns={"Cumulative total": "Total", "Daily change in cumulative total": "Daily"}, inplace=True)
df = df.replace({"Entity": {
"United Kingdom - people tested": "PeopleTested",
"United Kingdom - tests performed": "TestsPerformed"
}})
df = df.melt(id_vars=["Date", "Entity"], value_vars=["Total", "Daily"])
df["VarEntity"] = df["variable"] + df["Entity"]
df = df.pivot(index="Date", columns="VarEntity", values="value")
return df
def compare():
local = pd.read_csv("data/covid-19-tests-uk.csv")
owid = load_owid()
compare_tests = pd.merge(local, owid, how="inner", on="Date", right_index=False, left_index=False, suffixes=("", "_owid"))
compare_tests.drop(columns=["Country"], inplace=True)
compare_people = compare_tests[["Date", "DailyPeopleTested", "TotalPeopleTested", "DailyPeopleTested_owid", "TotalPeopleTested_owid"]]
compare_people["DailyPeopleTestedSame"] = compare_people["DailyPeopleTested"] == compare_people["DailyPeopleTested_owid"]
compare_people["TotalPeopleTestedSame"] = compare_people["TotalPeopleTested"] == compare_people["TotalPeopleTested_owid"]
print(compare_people)
compare_tests = compare_tests[["Date", "DailyTestsPerformed", "TotalTestsPerformed", "DailyTestsPerformed_owid", "TotalTestsPerformed_owid"]]
compare_tests["DailyTestsPerformedSame"] = compare_tests["DailyTestsPerformed"] == compare_tests["DailyTestsPerformed_owid"]
compare_tests["TotalTestsPerformedSame"] = compare_tests["TotalTestsPerformed"] == compare_tests["TotalTestsPerformed_owid"]
print(compare_tests)
if __name__ == "__main__":
| pd.set_option('display.max_rows', None) | pandas.set_option |
"""
In [8]: y.value_counts(normalize=True)
Out[8]:
90 0.294725
42 0.152013
65 0.125000
16 0.117737
15 0.063073
62 0.061672
88 0.047146
92 0.030454
67 0.026504
52 0.023318
95 0.022299
6 0.019241
64 0.012997
53 0.003823
Name: target, dtype: float64
for f in sorted(glob('*.py')):
# print(f'nohup python -u {f} 0 > LOG/log_{f}.txt &')
print(f'python -u {f} 0 > LOG/log_{f}.txt')
"""
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
from glob import glob
import os
from socket import gethostname
HOSTNAME = gethostname()
from tqdm import tqdm
#from itertools import combinations
from sklearn.model_selection import KFold
from time import time, sleep
from datetime import datetime
from multiprocessing import cpu_count, Pool
import gc
# =============================================================================
# global variables
# =============================================================================
COMPETITION_NAME = 'PLAsTiCC-2018'
SPLIT_SIZE = 100
TEST_LOGS = sorted(glob('../data/test_log*.pkl'))
AUG_LOGS = sorted(glob('../data/train_log_aug*.pkl'))
GENERATE_FEATURE_SIZE = 500
GENERATE_TEST = True
GENERATE_AUG = False
IMP_FILE = 'LOG/imp_801_cv.py-2.csv'
IMP_FILE_BEST = 'LOG/imp_used_934_predict_1120-1.py.csv'
classes_gal = [6, 16, 53, 65, 92]
classes_exgal = [15, 42, 52, 62, 64, 67, 88, 90, 95]
# =============================================================================
# def
# =============================================================================
def start(fname):
global st_time
st_time = time()
print("""
#==============================================================================
# START!!! {} PID: {} time: {}
#==============================================================================
""".format( fname, os.getpid(), datetime.today() ))
send_line(f'{HOSTNAME} START {fname} time: {elapsed_minute():.2f}min')
return
def reset_time():
global st_time
st_time = time()
return
def end(fname):
print("""
#==============================================================================
# SUCCESS !!! {}
#==============================================================================
""".format(fname))
print('time: {:.2f}min'.format( elapsed_minute() ))
send_line(f'{HOSTNAME} FINISH {fname} time: {elapsed_minute():.2f}min')
return
def elapsed_minute():
return (time() - st_time)/60
def mkdir_p(path):
try:
os.stat(path)
except:
os.mkdir(path)
def to_feature(df, path):
if df.columns.duplicated().sum()>0:
raise Exception(f'duplicated!: { df.columns[df.columns.duplicated()] }')
df.reset_index(inplace=True, drop=True)
df.columns = [c.replace('/', '-').replace(' ', '-') for c in df.columns]
for c in df.columns:
df[[c]].to_feather(f'{path}_{c}.f')
return
def to_pickles(df, path, split_size=3, inplace=True):
"""
path = '../output/mydf'
wirte '../output/mydf/0.p'
'../output/mydf/1.p'
'../output/mydf/2.p'
"""
print(f'shape: {df.shape}')
if inplace==True:
df.reset_index(drop=True, inplace=True)
else:
df = df.reset_index(drop=True)
gc.collect()
mkdir_p(path)
kf = KFold(n_splits=split_size)
for i, (train_index, val_index) in enumerate(tqdm(kf.split(df))):
df.iloc[val_index].to_pickle(f'{path}/{i:03d}.p')
return
def read_pickles(path, col=None, use_tqdm=True):
if col is None:
if use_tqdm:
df = pd.concat([ pd.read_pickle(f) for f in tqdm(sorted(glob(path+'/*'))) ])
else:
print(f'reading {path}')
df = pd.concat([ pd.read_pickle(f) for f in sorted(glob(path+'/*')) ])
else:
df = pd.concat([ pd.read_pickle(f)[col] for f in tqdm(sorted(glob(path+'/*'))) ])
return df
#def to_feathers(df, path, split_size=3, inplace=True):
# """
# path = '../output/mydf'
#
# wirte '../output/mydf/0.f'
# '../output/mydf/1.f'
# '../output/mydf/2.f'
#
# """
# if inplace==True:
# df.reset_index(drop=True, inplace=True)
# else:
# df = df.reset_index(drop=True)
# gc.collect()
# mkdir_p(path)
#
# kf = KFold(n_splits=split_size)
# for i, (train_index, val_index) in enumerate(tqdm(kf.split(df))):
# df.iloc[val_index].to_feather(f'{path}/{i:03d}.f')
# return
#
#def read_feathers(path, col=None):
# if col is None:
# df = pd.concat([pd.read_feather(f) for f in tqdm(sorted(glob(path+'/*')))])
# else:
# df = pd.concat([pd.read_feather(f)[col] for f in tqdm(sorted(glob(path+'/*')))])
# return df
def to_pkl_gzip(df, path):
df.to_pickle(path)
os.system('rm ' + path + '.gz')
os.system('gzip ' + path)
return
def save_test_features(df):
for c in df.columns:
df[[c]].to_pickle(f'../feature/test_{c}.pkl')
return
# =============================================================================
#
# =============================================================================
def get_dummies(df):
"""
binary would be drop_first
"""
col = df.select_dtypes('O').columns.tolist()
nunique = df[col].nunique()
col_binary = nunique[nunique==2].index.tolist()
[col.remove(c) for c in col_binary]
df = pd.get_dummies(df, columns=col)
df = pd.get_dummies(df, columns=col_binary, drop_first=True)
df.columns = [c.replace(' ', '-') for c in df.columns]
return df
def reduce_mem_usage(df):
col_int8 = []
col_int16 = []
col_int32 = []
col_int64 = []
col_float16 = []
col_float32 = []
col_float64 = []
col_cat = []
for c in tqdm(df.columns, mininterval=20):
col_type = df[c].dtype
if col_type != object:
c_min = df[c].min()
c_max = df[c].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
col_int8.append(c)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
col_int16.append(c)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
col_int32.append(c)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
col_int64.append(c)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
col_float16.append(c)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
col_float32.append(c)
else:
col_float64.append(c)
else:
col_cat.append(c)
if len(col_int8)>0:
df[col_int8] = df[col_int8].astype(np.int8)
if len(col_int16)>0:
df[col_int16] = df[col_int16].astype(np.int16)
if len(col_int32)>0:
df[col_int32] = df[col_int32].astype(np.int32)
if len(col_int64)>0:
df[col_int64] = df[col_int64].astype(np.int64)
if len(col_float16)>0:
df[col_float16] = df[col_float16].astype(np.float16)
if len(col_float32)>0:
df[col_float32] = df[col_float32].astype(np.float32)
if len(col_float64)>0:
df[col_float64] = df[col_float64].astype(np.float64)
if len(col_cat)>0:
df[col_cat] = df[col_cat].astype('category')
def check_var(df, var_limit=0, sample_size=None):
if sample_size is not None:
if df.shape[0]>sample_size:
df_ = df.sample(sample_size, random_state=71)
else:
df_ = df
# raise Exception(f'df:{df.shape[0]} <= sample_size:{sample_size}')
else:
df_ = df
var = df_.var()
col_var0 = var[var<=var_limit].index
if len(col_var0)>0:
print(f'remove var<={var_limit}: {col_var0}')
return col_var0
def check_corr(df, corr_limit=1, sample_size=None):
if sample_size is not None:
if df.shape[0]>sample_size:
df_ = df.sample(sample_size, random_state=71)
else:
raise Exception(f'df:{df.shape[0]} <= sample_size:{sample_size}')
else:
df_ = df
corr = df_.corr('pearson').abs() # pearson or spearman
a, b = np.where(corr>=corr_limit)
col_corr1 = []
for a_,b_ in zip(a, b):
if a_ != b_ and a_ not in col_corr1:
# print(a_, b_)
col_corr1.append(b_)
if len(col_corr1)>0:
col_corr1 = df.iloc[:,col_corr1].columns
print(f'remove corr>={corr_limit}: {col_corr1}')
return col_corr1
def remove_feature(df, var_limit=0, corr_limit=1, sample_size=None, only_var=True):
col_var0 = check_var(df, var_limit=var_limit, sample_size=sample_size)
df.drop(col_var0, axis=1, inplace=True)
if only_var==False:
col_corr1 = check_corr(df, corr_limit=corr_limit, sample_size=sample_size)
df.drop(col_corr1, axis=1, inplace=True)
return
def savefig_imp(imp, path, x='gain', y='feature', n=30, title='Importance'):
import matplotlib as mpl
mpl.use('Agg')
import seaborn as sns
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
# the size of A4 paper
fig.set_size_inches(11.7, 8.27)
sns.barplot(x=x, y=y, data=imp.head(n), label=x)
plt.subplots_adjust(left=.4, right=.9)
plt.title(title+' TOP{0}'.format(n), fontsize=20, alpha=0.8)
plt.savefig(path)
# =============================================================================
#
# =============================================================================
def load_train(col=None):
if col is None:
return pd.read_pickle('../data/train.pkl')
else:
return pd.read_pickle('../data/train.pkl')[col]
def load_test(col=None):
if col is None:
return pd.read_pickle('../data/test.pkl')
else:
return pd.read_pickle('../data/test.pkl')[col]
def load_target():
return | pd.read_pickle('../data/target.pkl') | pandas.read_pickle |
import numpy as np
import pandas as pd
from rdt import HyperTransformer
from rdt.transformers import OneHotEncodingTransformer
def get_input_data_with_nan():
data = pd.DataFrame({
'integer': [1, 2, 1, 3, 1],
'float': [0.1, 0.2, 0.1, np.nan, 0.1],
'categorical': ['a', 'b', np.nan, 'b', 'a'],
'bool': [False, np.nan, False, True, False],
'datetime': [
np.nan, '2010-02-01', '2010-01-01', '2010-02-01', '2010-01-01'
],
'names': ['Jon', 'Arya', 'Sansa', 'Jon', 'Robb'],
})
data['datetime'] = pd.to_datetime(data['datetime'])
return data
def get_input_data_without_nan():
data = pd.DataFrame({
'integer': [1, 2, 1, 3],
'float': [0.1, 0.2, 0.1, 0.1],
'categorical': ['a', 'b', 'b', 'a'],
'bool': [False, False, True, False],
'datetime': [
'2010-02-01', '2010-01-01', '2010-02-01', '2010-01-01'
],
'names': ['Jon', 'Arya', 'Sansa', 'Jon'],
})
data['datetime'] = pd.to_datetime(data['datetime'])
data['bool'] = data['bool'].astype('O') # boolean transformer returns O instead of bool
return data
def get_transformed_data():
return pd.DataFrame({
'integer': [1, 2, 1, 3],
'float': [0.1, 0.2, 0.1, 0.1],
'categorical': [0.75, 0.25, 0.25, 0.75],
'bool': [0.0, 0.0, 1.0, 0.0],
'datetime': [
1.2649824e+18,
1.262304e+18,
1.2649824e+18,
1.262304e+18
],
'names': [0.25, 0.875, 0.625, 0.25]
})
def get_transformed_nan_data():
return pd.DataFrame({
'integer': [1, 2, 1, 3, 1],
'float': [0.1, 0.2, 0.1, 0.125, 0.1],
'float#1': [0.0, 0.0, 0.0, 1.0, 0.0],
'categorical': [0.6, 0.2, 0.9, 0.2, 0.6],
'bool': [0.0, -1.0, 0.0, 1.0, 0.0],
'bool#1': [0.0, 1.0, 0.0, 0.0, 0.0],
'datetime': [
1.2636432e+18, 1.2649824e+18, 1.262304e+18,
1.2649824e+18, 1.262304e+18
],
'datetime#1': [1.0, 0.0, 0.0, 0.0, 0.0],
'names': [0.2, 0.9, 0.5, 0.2, 0.7],
})
def get_transformers():
return {
'integer': {
'class': 'NumericalTransformer',
'kwargs': {
'dtype': np.int64,
}
},
'float': {
'class': 'NumericalTransformer',
'kwargs': {
'dtype': np.float64,
}
},
'categorical': {
'class': 'CategoricalTransformer'
},
'bool': {
'class': 'BooleanTransformer'
},
'datetime': {
'class': 'DatetimeTransformer'
},
'names': {
'class': 'CategoricalTransformer',
},
}
def test_hypertransformer_with_transformers():
data = get_input_data_without_nan()
transformers = get_transformers()
ht = HyperTransformer(transformers)
ht.fit(data)
transformed = ht.transform(data)
expected = get_transformed_data()
np.testing.assert_allclose(
transformed.sort_index(axis=1).values,
expected.sort_index(axis=1).values
)
reversed_data = ht.reverse_transform(transformed)
original_names = data.pop('names')
reversed_names = reversed_data.pop('names')
pd.testing.assert_frame_equal(data.sort_index(axis=1), reversed_data.sort_index(axis=1))
for name in original_names:
assert name not in reversed_names
def test_hypertransformer_with_transformers_nan_data():
data = get_input_data_with_nan()
transformers = get_transformers()
ht = HyperTransformer(transformers)
ht.fit(data)
transformed = ht.transform(data)
expected = get_transformed_nan_data()
np.testing.assert_allclose(
transformed.sort_index(axis=1).values,
expected.sort_index(axis=1).values
)
reversed_data = ht.reverse_transform(transformed)
original_names = data.pop('names')
reversed_names = reversed_data.pop('names')
pd.testing.assert_frame_equal(data.sort_index(axis=1), reversed_data.sort_index(axis=1))
for name in original_names:
assert name not in reversed_names
def test_hypertransformer_without_transformers():
data = get_input_data_without_nan()
ht = HyperTransformer()
ht.fit(data)
transformed = ht.transform(data)
expected = get_transformed_data()
np.testing.assert_allclose(
transformed.sort_index(axis=1).values,
expected.sort_index(axis=1).values
)
reversed_data = ht.reverse_transform(transformed)
original_names = data.pop('names')
reversed_names = reversed_data.pop('names')
pd.testing.assert_frame_equal(data.sort_index(axis=1), reversed_data.sort_index(axis=1))
for name in original_names:
assert name not in reversed_names
def test_hypertransformer_without_transformers_nan_data():
data = get_input_data_with_nan()
ht = HyperTransformer()
ht.fit(data)
transformed = ht.transform(data)
expected = get_transformed_nan_data()
np.testing.assert_allclose(
transformed.sort_index(axis=1).values,
expected.sort_index(axis=1).values
)
reversed_data = ht.reverse_transform(transformed)
original_names = data.pop('names')
reversed_names = reversed_data.pop('names')
pd.testing.assert_frame_equal(data.sort_index(axis=1), reversed_data.sort_index(axis=1))
for name in original_names:
assert name not in reversed_names
def test_single_category():
ht = HyperTransformer(transformers={
'a': OneHotEncodingTransformer()
})
data = pd.DataFrame({
'a': ['a', 'a', 'a']
})
ht.fit(data)
transformed = ht.transform(data)
reverse = ht.reverse_transform(transformed)
pd.testing.assert_frame_equal(data, reverse)
def test_dtype_category():
df = pd.DataFrame({'a': ['a', 'b', 'c']}, dtype='category')
ht = HyperTransformer()
ht.fit(df)
trans = ht.transform(df)
rever = ht.reverse_transform(trans)
pd.testing.assert_frame_equal(df, rever)
def test_empty_transformers():
"""If transformers is an empty dict, do nothing."""
data = get_input_data_without_nan()
ht = HyperTransformer(transformers={})
ht.fit(data)
transformed = ht.transform(data)
reverse = ht.reverse_transform(transformed)
pd.testing.assert_frame_equal(data, transformed)
| pd.testing.assert_frame_equal(data, reverse) | pandas.testing.assert_frame_equal |
#!/usr/bin/python3
# Functions to handle Input
#############################################################################################
def read_csv(file):
# simple function to read data from a file
data = pd.read_csv(file, sep=';')
return data
# Functions to handle string/value conversion
#############################################################################################
# function converts format (DD-)HH:MM:SS to seconds
def ave2sec(x):
if ( '-' in x ):
vals = x.split('-')
times = vals[1].split(':')
sec = 24*3600*int(vals[0])+3600*int(times[0])+60*int(times[1])+int(times[2])
else:
times = x.split(':')
sec = 3600*int(times[0])+60*int(times[1])+int(times[2])
return (sec)
def scalingref(x):
# returns reference scaling factor for MPI jobs based on 1.5 factor:
# doubling cores should make parformance x1.5 (or better)
if int(x) == 1:
ref = 1
else:
ref = np.power(1/1.5,np.log2(int(x)))
return ref
def rss2g(x):
return int(float(x[:-1]))/1024
def reqmem2g(x):
return int(float(x[:-2]))/1024
# Functions to handle DataFrames
#############################################################################################
def parse_df(data):
# convert multi-line DataFrame to more compact form for analysis
import datetime
from dateutil import parser
data[['id','subid']] = data.JobID.str.split('_',1, expand=True)
data.drop(['subid'],axis=1, inplace=True)
df=pd.DataFrame()
df=data[~data['JobID'].str.contains("\.")]
df.rename(columns={'State': 'Parentstate'}, inplace=True)
data2=data.shift(-1).dropna(subset=['JobID'])
df2=data2[data2['JobID'].str.contains("\.batch")]
data2=data.shift(-2).dropna(subset=['JobID'])
df3=data2[data2['JobID'].str.contains("\.0")]
df.update(df2.MaxRSS)
df.update(df3.MaxRSS)
df.update(df2.AveCPU)
df.update(df3.AveCPU)
df=df.join(df2[['State']])
df.update(df3.State)
# drop columns that all all nan
df.dropna(axis=1, inplace=True, how='all')
# drop rows where any element is nan (errors in the data)
df.dropna(axis=0, inplace=True, how='any')
df.reset_index(inplace=True)
df.loc[:,'State']=df.State.apply(str)
# reduce data point, 5min accuracy instead of seconds just fine
df['Submit']=pd.to_datetime(df['Submit']).dt.round('5min')
df['Start']=pd.to_datetime(df['Start']).dt.round('5min')
df['End']=pd.to_datetime(df['End']).dt.round('5min')
# add extra columns to df for analysis purposes
df.insert(len(df.columns),'runtime',(pd.to_datetime(df['End'])-pd.to_datetime(df['Start']))/np.timedelta64(1,'s'))
df.insert(len(df.columns),'waittime',( | pd.to_datetime(df['Start']) | pandas.to_datetime |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas( | pd.Series(data, dtype="datetime64[ns]") | pandas.Series |
import pandas as pd
import numpy as np
import json
from utils import get_next_gw
from ranked_probability_score import ranked_probability_score, match_outcome
class Baselines:
""" Baselines and dummy models """
def __init__(self, games):
"""
Args:
games (pd.DataFrame): Finished games to used for training.
"""
self.games = games.loc[:, ["score1", "score2", "team1", "team2"]]
self.games = self.games.dropna()
self.games["score1"] = self.games["score1"].astype(int)
self.games["score2"] = self.games["score2"].astype(int)
self.teams = np.sort(np.unique(self.games["team1"]))
self.league_size = len(self.teams)
def uniform(self, games):
""" Uniform outcome odds
Args:
games (pd.DataFrame): Fixtures
Returns:
(pd.DataFrame): Fixture with outcome prediction
"""
parameter_df = (
pd.DataFrame()
.assign(team=self.teams)
)
fixtures_df = (
pd.merge(games, parameter_df, left_on='team1', right_on='team')
.merge(parameter_df, left_on='team2', right_on='team')
)
fixtures_df["home_win_p"] = 0.333
fixtures_df["draw_p"] = 0.333
fixtures_df["away_win_p"] = 0.333
return fixtures_df
def home_bias(self, games):
""" Odds biased towards home team
Args:
games (pd.DataFrame): Fixtures
Returns:
(pd.DataFrame): Fixture with outcome prediction
"""
parameter_df = (
pd.DataFrame()
.assign(team=self.teams)
)
fixtures_df = (
pd.merge(games, parameter_df, left_on='team1', right_on='team')
.merge(parameter_df, left_on='team2', right_on='team')
)
fixtures_df["home_win_p"] = 1
fixtures_df["draw_p"] = 0
fixtures_df["away_win_p"] = 0
return fixtures_df
def draw_bias(self, games):
""" Odds biased towards draw
Args:
games (pd.DataFrame): Fixtures
Returns:
(pd.DataFrame): Fixture with outcome prediction
"""
parameter_df = (
pd.DataFrame()
.assign(team=self.teams)
)
fixtures_df = (
pd.merge(games, parameter_df, left_on='team1', right_on='team')
.merge(parameter_df, left_on='team2', right_on='team')
)
fixtures_df["home_win_p"] = 0
fixtures_df["draw_p"] = 1
fixtures_df["away_win_p"] = 0
return fixtures_df
def away_bias(self, games):
""" Odds biased towards away team
Args:
games (pd.DataFrame): Fixtures
Returns:
(pd.DataFrame): Fixture with outcome prediction
"""
parameter_df = (
pd.DataFrame()
.assign(team=self.teams)
)
fixtures_df = (
pd.merge(games, parameter_df, left_on='team1', right_on='team')
.merge(parameter_df, left_on='team2', right_on='team')
)
fixtures_df["home_win_p"] = 0
fixtures_df["draw_p"] = 0
fixtures_df["away_win_p"] = 1
return fixtures_df
def random_odds(self, games):
""" Random odds
Args:
games (pd.DataFrame): Fixtures
Returns:
(pd.DataFrame): Fixture with outcome prediction
"""
parameter_df = (
pd.DataFrame()
.assign(team=self.teams)
)
fixtures_df = (
pd.merge(games, parameter_df, left_on='team1', right_on='team')
.merge(parameter_df, left_on='team2', right_on='team')
)
odds = np.random.rand(3, fixtures_df.shape[0])
fixtures_df["home_win_p"] = odds[0] / np.sum(odds, 0)
fixtures_df["draw_p"] = odds[1] / np.sum(odds, 0)
fixtures_df["away_win_p"] = odds[2] / np.sum(odds, 0)
return fixtures_df
def bookies_odds(self, games, path):
""" Bookies odds
Args:
games (pd.DataFrame): Fixtures
Returns:
(pd.DataFrame): Fixture with outcome prediction
"""
parameter_df = (
pd.DataFrame()
.assign(team=self.teams)
)
fixtures_df = (
pd.merge(games, parameter_df, left_on='team1', right_on='team')
.merge(parameter_df, left_on='team2', right_on='team')
)
predictions_market = (
pd.read_csv(f'{path}data/betting/2021-22.csv')
.loc[:, ["HomeTeam", "AwayTeam", "B365H", "B365D", "B365A"]]
.rename(columns={
"HomeTeam": "team1",
"AwayTeam": "team2",
"B365H": "home_win_p",
"B365D": "draw_p",
"B365A": "away_win_p"})
)
predictions_market = predictions_market.replace({
'Brighton': 'Brighton and Hove Albion',
'Leicester': 'Leicester City',
'Leeds': 'Leeds United',
'Man City': 'Manchester City',
'Man United': 'Manchester United',
'Norwich': 'Norwich City',
'Tottenham': 'Tottenham Hotspur',
'West Ham': 'West Ham United',
'Wolves': 'Wolverhampton'
})
fixtures_df = pd.merge(
fixtures_df,
predictions_market,
left_on=['team1', 'team2'],
right_on=['team1', 'team2'])
fixtures_df['total'] = (
100 / fixtures_df['home_win_p'] + 100 /
fixtures_df['draw_p'] + 100 / fixtures_df['away_win_p'])
fixtures_df['home_win_p'] = (
100 / fixtures_df['home_win_p'] / fixtures_df['total'])
fixtures_df['away_win_p'] = (
100 / fixtures_df['away_win_p'] / fixtures_df['total'])
fixtures_df['draw_p'] = (
100 / fixtures_df['draw_p'] / fixtures_df['total'])
return fixtures_df
def bookies_favorite(self, games, path):
""" Bookies Odds biased towards the favorite
Args:
games (pd.DataFrame): Fixtures
Returns:
(pd.DataFrame): Fixture with outcome prediction
"""
parameter_df = (
pd.DataFrame()
.assign(team=self.teams)
)
fixtures_df = (
pd.merge(games, parameter_df, left_on='team1', right_on='team')
.merge(parameter_df, left_on='team2', right_on='team')
)
predictions_market = (
pd.read_csv(f'{path}data/betting/2021-22.csv')
.loc[:, ["HomeTeam", "AwayTeam", "B365H", "B365D", "B365A"]]
.rename(columns={
"HomeTeam": "team1",
"AwayTeam": "team2",
"B365H": "home_win_p",
"B365D": "draw_p",
"B365A": "away_win_p"})
)
predictions_market = predictions_market.replace({
'Brighton': 'Brighton and Hove Albion',
'Leicester': 'Leicester City',
'Leeds': 'Leeds United',
'Man City': 'Manchester City',
'Man United': 'Manchester United',
'Norwich': 'Norwich City',
'Tottenham': 'Tottenham Hotspur',
'West Ham': 'West Ham United',
'Wolves': 'Wolverhampton'
})
fixtures_df = pd.merge(
fixtures_df,
predictions_market,
left_on=['team1', 'team2'],
right_on=['team1', 'team2'])
max_odds = np.argmax(
fixtures_df[['home_win_p', 'draw_p', 'away_win_p']].values, 1)
favorites = np.zeros(
fixtures_df[['home_win_p', 'draw_p', 'away_win_p']].values.shape)
favorites[np.arange(0, max_odds.shape[0]), max_odds] = 1
fixtures_df['home_win_p'] = favorites[:, 0]
fixtures_df['away_win_p'] = favorites[:, 2]
fixtures_df['draw_p'] = favorites[:, 1]
return fixtures_df
def evaluate(self, games, function_name, path=''):
""" Evaluate the model's prediction accuracy
Args:
games (pd.DataFrame): Fixtured to evaluate on
function_name (string): Function to execute
path (string): Path extension to adjust to ipynb use
Returns:
pd.DataFrame: df with appended metrics
"""
if function_name == "uniform":
fixtures_df = self.uniform(games)
if function_name == "home":
fixtures_df = self.home_bias(games)
if function_name == "draw":
fixtures_df = self.draw_bias(games)
if function_name == "away":
fixtures_df = self.away_bias(games)
if function_name == "random":
fixtures_df = self.random_odds(games)
if function_name == "bookies":
fixtures_df = self.bookies_odds(games, path)
if function_name == "favorite":
fixtures_df = self.bookies_favorite(games, path)
fixtures_df["winner"] = match_outcome(fixtures_df)
fixtures_df["rps"] = fixtures_df.apply(
lambda row: ranked_probability_score(
[row["home_win_p"], row["draw_p"],
row["away_win_p"]], row["winner"]), axis=1)
return fixtures_df
if __name__ == "__main__":
with open('info.json') as f:
season = json.load(f)['season']
next_gw = get_next_gw()
df = pd.read_csv("data/fivethirtyeight/spi_matches.csv")
df = (
df
.loc[(df['league_id'] == 2411) | (df['league_id'] == 2412)]
)
# Get GW dates
fixtures = (
pd.read_csv("data/fpl_official/vaastav/data/2021-22/fixtures.csv")
.loc[:, ['event', 'kickoff_time']])
fixtures["kickoff_time"] = | pd.to_datetime(fixtures["kickoff_time"]) | pandas.to_datetime |
#!/usr/bin/env python
"""
Fuse csv files into one single file.
"""
## file_fuser.py
### fuse individual files into one giant file
import pandas as pd
import os
import argparse
from abc import ABCMeta, abstractmethod
class CsvFuserAbs(object, metaclass=ABCMeta):
## This object initialized from command line arguments when used in a Python script
def __init__(self):
## parse named arguments from command line
self.parser = argparse.ArgumentParser()
self.parser.add_argument('--inputStart', '-i', help="intput file name starting pattern (only files that match will be fused)", type=str)
self.parser.add_argument('--input_index', '-idx', help="enter True if input has an index / Omit this argument if it doesn't", type=bool, default=False)
self.parser.add_argument('--output', '-o', help="Name of final output file", type= str)
self.parser.add_argument('--output_index', '-odx', help="enter true to output an index / Omit this argument to leave off the index", type=bool, default=False)
self.parser.add_argument('--dir', '-d', help="input files directory", type= str, default=".")
self.args=self.parser.parse_args()
self.pyVer = "3.6.1"
self.tmpDF = pd.DataFrame() # holds entire input file when
self.tmpDF2 = pd.DataFrame() # DF to get overwritten by each file fragment
def fuse_files(self):
fileStart = self.args.inputStart # start of filenames to fuse here (assumes all files have a pattern starting with this)
outfilename = self.args.output # output file to merge files into
path = self.args.dir # directory files are found in
print("Input files will be located at: ", path)
print("args.input_index is set to:", str(type(self.args.input_index)), self.args.input_index)
print("args.output_index is set to:", str(type(self.args.output_index)), self.args.output_index)
filesInDir = os.listdir(path)
# print(type(filesInDir)) # is list
filesInDir.sort()
outDF = pd.DataFrame()
tmpDF = pd.DataFrame()
if self.args.input_index == True:
df_input_indx = 0 ## if true - set index column number to index 0
else:
df_input_indx = False
for name in filesInDir:
if name[0:len(fileStart)] == fileStart:
# print(name[0:len(fileStart)])
# print(name)
print("Adding This File To Output: ", name)
tmpDF = | pd.read_csv(name, index_col=df_input_indx) | pandas.read_csv |
import pandas as pd
import io
import lithops
from .utils import derived_from, is_series_like, M
no_default = "__no_default__"
class DataFrame:
def __init__(self, df, filepath, npartitions):
self.filepath = filepath
self.df = df
self.npartitions = npartitions
def reduction(
self,
chunk,
aggregate=None,
combine=None,
meta=no_default,
token=None,
split_every=None,
chunk_kwargs=None,
aggregate_kwargs=None,
combine_kwargs=None,
**kwargs,
):
"""Generic row-wise reductions.
Parameters
----------
chunk : callable
Function to operate on each partition. Should return a
``pandas.DataFrame``, ``pandas.Series``, or a scalar.
aggregate : callable, optional
Function to operate on the concatenated result of ``chunk``. If not
specified, defaults to ``chunk``. Used to do the final aggregation
in a tree reduction.
The input to ``aggregate`` depends on the output of ``chunk``.
If the output of ``chunk`` is a:
- scalar: Input is a Series, with one row per partition.
- Series: Input is a DataFrame, with one row per partition. Columns
are the rows in the output series.
- DataFrame: Input is a DataFrame, with one row per partition.
Columns are the columns in the output dataframes.
Should return a ``pandas.DataFrame``, ``pandas.Series``, or a
scalar.
combine : callable, optional
Function to operate on intermediate concatenated results of
``chunk`` in a tree-reduction. If not provided, defaults to
``aggregate``. The input/output requirements should match that of
``aggregate`` described above.
$META
token : str, optional
The name to use for the output keys.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used,
and all intermediates will be concatenated and passed to
``aggregate``. Default is 8.
chunk_kwargs : dict, optional
Keyword arguments to pass on to ``chunk`` only.
aggregate_kwargs : dict, optional
Keyword arguments to pass on to ``aggregate`` only.
combine_kwargs : dict, optional
Keyword arguments to pass on to ``combine`` only.
kwargs :
All remaining keywords will be passed to ``chunk``, ``combine``,
and ``aggregate``.
Examples
--------
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': range(50), 'y': range(50, 100)})
>>> ddf = dd.from_pandas(df, npartitions=4)
Count the number of rows in a DataFrame. To do this, count the number
of rows in each partition, then sum the results:
>>> res = ddf.reduction(lambda x: x.count(),
... aggregate=lambda x: x.sum())
>>> res.compute()
x 50
y 50
dtype: int64
Count the number of rows in a Series with elements greater than or
equal to a value (provided via a keyword).
>>> def count_greater(x, value=0):
... return (x >= value).sum()
>>> res = ddf.x.reduction(count_greater, aggregate=lambda x: x.sum(),
... chunk_kwargs={'value': 25})
>>> res.compute()
25
Aggregate both the sum and count of a Series at the same time:
>>> def sum_and_count(x):
... return pd.Series({'count': x.count(), 'sum': x.sum()},
... index=['count', 'sum'])
>>> res = ddf.x.reduction(sum_and_count, aggregate=lambda x: x.sum())
>>> res.compute()
count 50
sum 1225
dtype: int64
Doing the same, but for a DataFrame. Here ``chunk`` returns a
DataFrame, meaning the input to ``aggregate`` is a DataFrame with an
index with non-unique entries for both 'x' and 'y'. We groupby the
index, and sum each group to get the final result.
>>> def sum_and_count(x):
... return pd.DataFrame({'count': x.count(), 'sum': x.sum()},
... columns=['count', 'sum'])
>>> res = ddf.reduction(sum_and_count,
... aggregate=lambda x: x.groupby(level=0).sum())
>>> res.compute()
count sum
x 50 1225
y 50 3725
"""
if aggregate is None:
aggregate = chunk
if combine is None:
if combine_kwargs:
raise ValueError("`combine_kwargs` provided with no `combine`")
combine = aggregate
combine_kwargs = aggregate_kwargs
chunk_kwargs = chunk_kwargs.copy() if chunk_kwargs else {}
chunk_kwargs["aca_chunk"] = chunk
combine_kwargs = combine_kwargs.copy() if combine_kwargs else {}
combine_kwargs["aca_combine"] = combine
aggregate_kwargs = aggregate_kwargs.copy() if aggregate_kwargs else {}
aggregate_kwargs["aca_aggregate"] = aggregate
return aca(
self,
chunk=_reduction_chunk,
aggregate=_reduction_aggregate,
combine=_reduction_combine,
meta=meta,
token=token,
split_every=split_every,
chunk_kwargs=chunk_kwargs,
aggregate_kwargs=aggregate_kwargs,
combine_kwargs=combine_kwargs,
**kwargs,
)
def _reduction_agg(self, name, axis=None, skipna=True, split_every=False, out=None):
axis = self._validate_axis(axis)
meta = getattr(self._meta_nonempty, name)(axis=axis, skipna=skipna)
token = self._token_prefix + name
method = getattr(M, name)
if axis == 1:
result = self.map_partitions(
method, meta=meta, token=token, skipna=skipna, axis=axis
)
return handle_out(out, result)
else:
result = self.reduction(
method,
meta=meta,
token=token,
skipna=skipna,
axis=axis,
split_every=split_every,
)
if isinstance(self, DataFrame):
result.divisions = (self.columns.min(), self.columns.max())
return handle_out(out, result)
def apply(
self,
func,
axis=0,
broadcast=None,
raw=False,
reduce=None,
args=(),
meta=no_default,
result_type=None,
**kwds,
):
"""Parallel version of pandas.DataFrame.apply
This mimics the pandas version except for the following:
1. Only ``axis=1`` is supported (and must be specified explicitly).
2. The user should provide output metadata via the `meta` keyword.
Parameters
----------
func : function
Function to apply to each column/row
axis : {0 or 'index', 1 or 'columns'}, default 0
- 0 or 'index': apply function to each column (NOT SUPPORTED)
- 1 or 'columns': apply function to each row
$META
args : tuple
Positional arguments to pass to function in addition to the array/series
Additional keyword arguments will be passed as keywords to the function
Returns
-------
applied : Series or DataFrame
Examples
--------
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
Apply a function to row-wise passing in extra arguments in ``args`` and
``kwargs``:
>>> def myadd(row, a, b=1):
... return row.sum() + a + b
>>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5) # doctest: +SKIP
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with name ``'x'``, and dtype
``float64``:
>>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5, meta=('x', 'f8'))
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ddf.apply(lambda row: row + 1, axis=1, meta=ddf)
See Also
--------
dask.DataFrame.map_partitions
"""
pandas_kwargs = {"axis": axis, "raw": raw, "result_type": result_type}
if axis == 0:
msg = (
"lithops.DataFrame.apply only supports axis=1\n"
" Try: df.apply(func, axis=1)"
)
raise NotImplementedError(msg)
def pandas_apply_function(obj):
buf = io.BytesIO(obj.data_stream.read())
df = pd.read_csv(buf)
df.apply(func, args=args, **kwds, **pandas_kwargs)
fexec = lithops.FunctionExecutor()
fexec.map(pandas_apply_function, [self.filepath], chunk_n=self.npartitions)
fexec.wait()
return self
@derived_from(pd.DataFrame)
def all(self, axis=None, skipna=True, split_every=False, out=None):
def pandas_all_function(obj):
buf = io.BytesIO(obj.data_stream.read())
df = pd.read_csv(buf)
df.all(axis=axis, skipna=skipna)
fexec = lithops.FunctionExecutor()
fexec.map(pandas_all_function, [self.filepath], chunk_n=self.npartitions)
fexec.wait()
return self
@derived_from(pd.DataFrame)
def any(self, axis=None, skipna=True, split_every=False, out=None):
def pandas_any_function(obj):
buf = io.BytesIO(obj.data_stream.read())
df = | pd.read_csv(buf) | pandas.read_csv |
import numpy as np
import matplotlib.pyplot as plt
import GPy
from gosafeopt import linearly_spaced_combinations
from gosafeopt import GoSafeOptPractical
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pandas as pd
import os
import time
'''
own example
'''
plt.rcParams.update({'font.size': 16})
import random
#warnings.filterwarnings("ignore")
random.seed(10)
np.random.seed(10)
class mod_sys:
"""
class used to define 1D nonlinear system
"""
def __init__(self,k_init=0,x_des=0,high=0.8):
self.a = 1.01
self.b = -0.2
self.k = k_init
self.state = np.array([[0]])
self.x_des = x_des
high_obs = np.array([high])
self.state_limits = list(zip(-high_obs,high_obs))
def reset(self,x_0=None):
if x_0 is None:
self.state = np.array([[0]])
else:
self.state=np.array([[x_0]])
def step(self):
v = np.random.normal(0, 1e-4)
w = np.random.normal(0, 1e-4)
obs = self.state + w
action = np.abs(self.k * (obs - self.x_des))
self.state = self.a * np.sqrt(np.abs(self.state)) + self.b * np.sqrt(action) + v
def generate_heat_map(n_points):
"""
Performs grid search to generate a map for the objective and constraint function
with respect to the parameters and initial states.
"""
overall_points=int(3*n_points/5)
a1 = np.linspace(-6,6,overall_points)
a2=np.linspace(-1,1,n_points-overall_points)
a=np.hstack((a1,a2))
a[::-1].sort()
x_0 = np.linspace(-0.3,0.3,n_points)
sys=mod_sys()
n_steps=1000
F=np.zeros([n_points,n_points])
G=np.ones([n_points,n_points])*np.inf
for k in range(n_points):
for s in range(n_points):
sys.reset(x_0[s])
sys.k = a[k]
F[k,s]=-sys.state**2
G[k,s]=min(G[k,s],F[k,s])
for j in range(n_steps):
sys.step()
cost=sys.state**2
F[k,s]=F[k,s]-cost[0]
G[k,s]=min(G[k,s],-cost[0])
Data=np.zeros([n_points,2])
Data[:,0]=a
Data[:,1]=x_0
return Data,F,G
def plot_function(n_points=101):
"""
Plots the objetive function and safe set.
"""
a = np.linspace(-6, 6, n_points)
sys = mod_sys()
n_steps = 1000
f=np.zeros(n_points)
g=np.ones(n_points)*np.inf
for k in range(n_points):
sys.reset()
sys.k=a[k]
f[k]=-sys.state**2
g[k]=min(g[k],f[k])
for j in range(n_steps):
sys.step()
cost = sys.state ** 2
f[k] = f[k] - cost[0]
g[k] = min(g[k], -cost[0])
fig_g = plt.figure(figsize=(14, 14))
left, bottom, width, height = 0.1, 0.1, 0.8, 0.8
ax = fig_g.add_axes([left, bottom, width, height])
ax.plot(a, g, color="black", label="g")
ax.axhline(y=-0.81, color='r', linestyle='-')
ax.set_title('Constraint function')
ax.set_xlabel('a')
ax.set_ylabel('g')
ax.set_xlim([-6.5, 6.5])
name = "g.png"
fig_g.savefig(name, dpi=300)
fig_f = plt.figure(figsize=(14, 14))
left, bottom, width, height = 0.1, 0.1, 0.8, 0.8
ax = fig_f.add_axes([left, bottom, width, height])
ax.plot(a, f, color="black", label="g")
ax.axhline(y=-800, color='r', linestyle='-')
ax.set_title('objective function')
ax.set_xlabel('a')
ax.set_ylabel('f')
ax.set_xlim([-6.5, 6.5])
name = "f.png"
fig_f.savefig(name, dpi=300)
class Optimizer(object):
"""
Defines the optimizer
"""
def __init__(self,initial_k=1,high=0.9,f_min=-np.inf,num_it=1000,lengthscale=0.5,ARD=True,variance=10000,eta=0.5):
self.Fail = False
self.at_boundary = False
self.sys=mod_sys(high=high)
self.high=high
self.f_min=f_min
self.cost_bound=min(-f_min,num_it)
self.sys.k=initial_k
self.num_it=num_it
self.rollout_values=[0,0]
self.rollout_limit = 10
self.rollout_data = []
# Define bounds for parameters
bounds = [[-6, 5]]
self.horizon_cost = np.zeros(self.num_it + 1)
# Simulate to gather initial safe policy
self.simulate()
y1 = np.array([[self.rollout_values[0]]])
y1 = y1.reshape(-1, 1)
y2 = np.array([[self.rollout_values[1]]])
y2 = y2.reshape(-1, 1)
L = [lengthscale,0.2]
# GPs.
a=np.asarray([[self.sys.k]])
x = np.array([[self.sys.k, 0]])
KERNEL_f = GPy.kern.sde_Matern32(input_dim=x.shape[1], lengthscale=L, ARD=ARD,variance=variance)
gp_full1 = GPy.models.GPRegression(x, y1, noise_var=0.01**2, kernel=KERNEL_f) #noise_var=0.01**2
KERNEL_g=GPy.kern.sde_Matern32(input_dim=x.shape[1], lengthscale=L, ARD=ARD,variance=1)
gp_full2 = GPy.models.GPRegression(x, y2, noise_var=0.01 ** 2, kernel=KERNEL_g)
KERNEL_f = GPy.kern.sde_Matern32(input_dim=a.shape[1], lengthscale=lengthscale, ARD=ARD, variance=variance)
gp1 = GPy.models.GPRegression(a, y1, noise_var=0.01 ** 2, kernel=KERNEL_f)
KERNEL_g = GPy.kern.sde_Matern32(input_dim=a.shape[1], lengthscale=lengthscale * 2, ARD=ARD, variance=1)
gp2 = GPy.models.GPRegression(a, y2, noise_var=0.01 ** 2, kernel=KERNEL_g)
# Set up optimizer
L_states=L[1]
self.opt=GoSafeOptPractical(gp=[gp1, gp2],gp_full=[gp_full1,gp_full2],L_states=L_states,bounds=bounds,fmin=[f_min, -high**2],x_0=np.array([[0]]),eta_L=eta,max_S1_steps=30,max_S3_steps=10,eps=0.1,max_data_size=100,reset_size=20) #worked for maxS2_steps=100
params = np.linspace(2, 3, 2)
self.initialize_gps(params)
self.time_recorded=[]
def reset(self,x_0=None):
"""
Reset system to iniitial state
"""
self.sys.reset(x_0)
self.Fail=False
self.at_boundary=False
self.rollout_values = [0, 0]
self.rollout_data=[]
self.horizon_cost = np.zeros(self.num_it + 1)
def initialize_gps(self,params):
for k in params:
self.sys.k=k
self.simulate()
x = np.array([[self.sys.k, 0]])
y = np.array([[self.rollout_values[0]], [self.rollout_values[1]]])
y = y.squeeze()
self.opt.add_new_data_point(x, y)
def simulate(self,opt=None,x_0=None):
"""
Simulate system
"""
self.reset(x_0)
f = -self.sys.state[0] ** 2
g = f
self.horizon_cost[0]=f
for i in range(self.num_it):
if i<self.rollout_limit:
x = np.array([[self.sys.k, self.sys.state[0][0]]])
self.rollout_data.append(x)
cost, constraint = self.step(opt)
if self.Fail:
self.rollout_values = [f, g]
print("Failed",end=" ")
break
f =f - cost[0]
self.horizon_cost[i+1]=-cost[0]
g = min(g, constraint[0])
if not self.Fail:
self.rollout_values=[f,g]
print("function values",f,g,end=" ")
def optimize(self):
"""
Perform 1 full optimization step
"""
self.opt.update_boundary_points()
start_time = time.time()
a = self.opt.optimize()
self.time_recorded.append(time.time()-start_time)
print(self.opt.criterion,a,end=" ")
self.sys.k = a
self.simulate(opt=self.opt)
x = np.array([[a.squeeze(), self.opt.x_0.squeeze()]])
y = np.array([[self.rollout_values[0]], [self.rollout_values[1]]])
y = y.squeeze()
if self.rollout_values[0]<=self.f_min or self.rollout_values[1]<=-self.high**2:
print("hit constraint", end= " ")
if not self.at_boundary:
self.add_data(y)
#self.opt.add_new_data_point(x,y)
else:
self.opt.add_boundary_points(a.reshape(1,-1))
df2 = | pd.DataFrame([[a, self.opt.x_0,self.rollout_values[0][0],self.rollout_values[1][0],self.opt.criterion,self.at_boundary,self.Fail]],
columns=['a',"x","f","g","criteria","boundary","Fail"]) | pandas.DataFrame |
import re
import os
import pandas as pd
import numpy as np
def readGas(DataPath, building, building_num, write_data, datafile, floor_area):
dateparse = lambda x: pd.datetime.strptime(x, '%d-%b-%y')
print('importing gas data from:', DataPath + building + '/Data/' + datafile + '_SubmeteringData.csv')
if building_num == 1: # Central House
df = pd.read_csv(DataPath + building + '/Data/' + datafile + '_GasData.csv', date_parser=dateparse,
header=0, index_col=0)
df = df.loc['2013-01-01':'2016-10-01'] # ['2015-09-31':'2016-10-01'] ['2012-01-24':'2016-10-01']
df = df.groupby(df.index.month).mean() # get the monthly mean over multiple years
df = pd.concat([df[9:], df[:9]]) # reorder the months to align with the submetered data...
rng = pd.date_range(start='09/2016', end='09/2017', freq='M')
df = df.set_index(rng) # set new index to align mean monthly gas data with metered electricity
df.rename(columns={df.columns[0]: 'Gas'}, inplace=True)
return df
def readSTM(DataPathSTM, building, building_num, write_data, datafile, floor_area):
""" Short Term Monitoring """
if building_num in {0}:
dateparseSTM = lambda x: pd.datetime.strptime(x, '%d-%m-%y %H:%M')
elif building_num in {1}:
dateparseSTM = lambda x: pd.datetime.strptime(x, '%d/%m/%Y %H:%M')
if building_num in {0,1}:
df_stm = pd.read_csv(DataPathSTM + datafile + '/' + datafile + '_combined.csv', date_parser=dateparseSTM, header=0,index_col=0)
else:
df_stm = pd.DataFrame()
cols = df_stm.columns.tolist()
if building_num == 0: # MaletPlaceEngineering
cols_new = ['Server GF [GP3]', 'Lighting 2nd', 'Power 2nd', 'Lighting 3rd', 'Power 3rd', 'DB1', 'Lighting 6th',
'Power 6th', 'Power 7th', 'Lighting 7th']
for i, v in enumerate(cols):
df_stm.rename(columns={cols[i]: cols_new[i]}, inplace=True)
if building_num == 1: # CentralHouse
cols_new = ['MCP01', 'B2', 'PV', '3A', '3D', 'B41']
for i, v in enumerate(cols):
df_stm.rename(columns={cols[i]: cols_new[i]}, inplace=True)
""" Manipulate """
# Created average and standard deviation profiles for the weekday and weekendday for short term monitoring. Interpolated the values to half-hour based on 2hour metering data.
df_stm = df_stm.divide(8) # because it's kWh each value needs to be divided by 8 if we go from 2h to 15min frequency
df_stm = df_stm[~df_stm.index.duplicated(keep='first')]
df_stm = df_stm.reindex(pd.date_range(start=df_stm.index.min(), end=df_stm.index.max(), freq='15Min'))
df_stm = df_stm.interpolate(method='linear')
return df_stm
def readSubmetering(DataPath, building, building_num, building_abr, write_data, datafile, df_stm, floor_area):
print(building_abr)
print('importing submetering data from:', DataPath + building + '/Data/' + datafile + '_SubmeteringData.csv')
if building_abr == 'MPEB': # Malet Place Engineering
dateparse = lambda x: pd.datetime.strptime(x, '%Y-%b-%d %H:%M:%S.000')
df = pd.read_csv(DataPath + building + '/Data/' + datafile + '_SubmeteringData.csv', date_parser=dateparse,
header=0, index_col=0)
# Check if there are duplicate index values (of which there are in CH data) and remove them...
df = df[~df.index.duplicated(keep='first')]
# There are missing indices in the data, reindex the missing indices of which there are only a few and backfill them
df = df.reindex(pd.date_range(df.index.min(), df.index.max(), freq='15Min'), method='backfill')
df_realweather = pd.DataFrame()
cols = df.columns.tolist()
df = df.loc['2016-09-01 00:15:00':'2017-08-31'] # ['2016-09-01':'2017-04-30']
cols_new = ['B10', 'B11', 'B12', 'B14', 'B15', 'B16', 'B17', 'B8', 'B9', 'BB4', 'BB3',
'CH1', 'CH2', 'DB5MS', 'GP2', 'Dynamo', 'Fire Lift', 'Lift Panel',
'Lift P1', 'LV1', 'LV2', 'LV3', 'MCCB03', 'MCCB01', 'BB2', 'BB1']
# print(pd.DataFrame([df.columns, cols_new]).T)
# stm cols [server excluded...,'2L','2P','3L','3P','DB1','6L','6P','7P','7L']
for i, v in enumerate(cols):
df.rename(columns={cols[i]: cols_new[i]}, inplace=True)
df_stm = pd.concat([df_stm], axis=1, join_axes=[df.index]) # set the short term monitoring to the same axis
df = pd.concat([df, df_stm], axis=1)
# df = df.convert_objects(convert_numeric=True).fillna(0)
df[df < 0] = 0 # set negative values (LV3) to 0
df_MPEB = pd.concat([df[['LV1', 'LV2']]], axis=1)
df_MPEB = df_MPEB.sum(axis=1)
df_MPEB = df_MPEB.sum(axis=0)
print('df_MPEB total kWh/m2a:', df_MPEB / floor_area)
# real LV metered
df_LV1_real = df['LV1']
df_LV2_real = df['LV2']
df_mains = pd.DataFrame(pd.concat([df_LV1_real, df_LV2_real], axis=1).sum(axis=1), columns=['Mains'])
df_workshops = pd.DataFrame(pd.concat([df[['BB4', 'BB3', 'LV3', 'GP2']]], axis=1).sum(axis=1),
columns=['Workshops'])
df_lifts = pd.DataFrame(pd.concat([df[['Fire Lift', 'Lift Panel']]], axis=1).sum(axis=1), columns=['Lifts'])
df_mech = pd.DataFrame(pd.concat([df[['MCCB03', 'Dynamo', 'MCCB01']]], axis=1).sum(axis=1), columns=['Systems'])
df_chillers = pd.DataFrame(pd.concat([df[['CH1', 'CH2']]]).sum(axis=1), columns=['Chillers'])
# Lighting and Power
df_BB2 = pd.DataFrame(pd.concat([df[['Lighting 6th', 'Power 6th', 'Lighting 7th', 'Power 7th']].sum(axis=1),
pd.DataFrame(df[['Lighting 7th', 'Power 6th']].sum(axis=1) * 3)], axis=1).sum(
axis=1), columns=['BB2 L&P'])
df_BB1 = pd.DataFrame(pd.concat([df[['Lighting 2nd', 'Power 2nd', 'Lighting 3rd', 'Power 3rd']].sum(axis=1),
pd.DataFrame(df[['Lighting 6th', 'Power 6th']].sum(axis=1) * 2)], axis=1).sum(
axis=1), columns=['BB1 L&P'])
df_BB1_surplus = df['BB1'] - df['DB1'] - df['Server GF [GP3]'] - df_BB1['BB1 L&P']
df_BB2_surplus = df['BB2'] - df_BB2['BB2 L&P']
print('Busbar 1')
print('BB1', df['BB1'].sum(axis=0) / floor_area)
print('DB1', df['DB1'].sum(axis=0) / floor_area)
print('GP3', df['Server GF [GP3]'].sum(axis=0) / floor_area)
print('BB1 L&P', df_BB1['BB1 L&P'].sum(axis=0) / floor_area)
print('BB1remaining', df_BB1_surplus.sum(axis=0) / floor_area)
print('LP on 6th', pd.DataFrame(df[['Lighting 6th', 'Power 6th']]).sum(axis=1).sum(axis=0) / floor_area)
print('LP on 2 and 3rd',
df[['Lighting 2nd', 'Power 2nd', 'Lighting 3rd', 'Power 3rd']].sum(axis=1).sum(axis=0) / floor_area)
print('Busbar 2')
print('BB2', df['BB2'].sum(axis=0) / floor_area)
print('BB2 L&P', df_BB2['BB2 L&P'].sum(axis=0) / floor_area)
print('BB2remaining', df_BB2_surplus.sum(axis=0) / floor_area)
print(((df_BB1_surplus.sum(axis=0) / floor_area) + (df_BB2_surplus.sum(axis=0) / floor_area)) / (
df['DB1'].sum(axis=0) / floor_area))
print(((df_BB1_surplus.sum(axis=0) / floor_area) + (df_BB2_surplus.sum(axis=0) / floor_area)) / df['DB1'].sum(
axis=0) / floor_area)
df_lp = pd.DataFrame(pd.concat([df_BB1['BB1 L&P'], df_BB2['BB2 L&P']], axis=1).sum(axis=1),
columns=['floors L&P'])
surplus_basedonDB1 = df['DB1'] * ((((df_BB1_surplus.sum(axis=0) / floor_area) + (
df_BB2_surplus.sum(axis=0) / floor_area)) / (df['DB1'].sum(axis=0) / floor_area)) / 10)
# keep within 20% of the mean.
surplus_basedonDB1[
surplus_basedonDB1 < surplus_basedonDB1.mean() - 0.2 * surplus_basedonDB1.mean()] = surplus_basedonDB1.mean() # remove negative values..
surplus_basedonDB1[
surplus_basedonDB1 > surplus_basedonDB1.mean() + 0.2 * surplus_basedonDB1.mean()] = surplus_basedonDB1.mean() # remove negative values..
df_BB1and2 = pd.DataFrame(
df[['BB1', 'BB2']].sum(axis=1) - surplus_basedonDB1 - df['Server GF [GP3]'] - df['DB1'], columns=['L&P'])
# scaled_daily(df_BB1and2.resample('30Min').sum(), building_label='MPEB', building_abr='MPEB', day_type='three', scale=False, time_interval='30Min')
surplus = pd.concat([df_BB1_surplus + df_BB2_surplus], axis=1)
# determine server based on difference between LV2 and dissaggregated LV2.
df_LV2_aggregate = pd.concat([df[['BB1', 'BB2', 'CH2', 'MCCB01', 'GP2']]],
axis=1) # LV2, missing Fire alam and DB409 (big server)
df_LV2_aggregate = df_LV2_aggregate.sum(axis=1)
df_bigserver = pd.DataFrame(df_LV2_real - df_LV2_aggregate, columns=[
'DB409']) # difference between LV2 and LV2 dissaggregated is the difference, which should be the server.
df_bigserver[df_bigserver < 0] = 0 # remove negative values...
df_bigserver = pd.DataFrame(
pd.concat([df_bigserver, surplus_basedonDB1, df['Server GF [GP3]'], df['DB1']], axis=1).sum(axis=1),
columns=['DB409'])
print(df_bigserver.sum(axis=0) / floor_area, 'kWh/m2a')
df_floorsLP = pd.DataFrame(pd.concat([df[['BB1', 'BB2']]], axis=1).sum(axis=1), columns=['L&P'])
df_floorsLP['L&P'] = df_floorsLP['L&P'] - df['Server GF [GP3]']
df_floorsLP = pd.DataFrame(pd.concat([df_BB1, df_BB2], axis=1).sum(axis=1), columns=['L&P'])
df_servers = pd.DataFrame(pd.concat([df_bigserver, df[['Server GF [GP3]']]], axis=1).sum(axis=1),
columns=['Servers'])
print("Average kWh per day for the server DB409 = " + str(df_bigserver.mean()))
df_LVL1 = pd.concat([df_BB1and2, df_chillers, df_mech, df_servers, df_workshops],
axis=1) # LV1, missing LV1A, PF LV1
print('Workshops', df_workshops.values.sum() / floor_area, 'servers', df_servers.values.sum() / floor_area,
'Lifts', df_lifts.values.sum() / floor_area)
print('lift', df['Lift P1'].values.sum() / floor_area)
print('GP2', df['GP2'].values.sum() / floor_area)
print('DB5MS', df['DB5MS'].values.sum() / floor_area)
# diff between BB3 aggregated and separate
df_BB3 = df[['B9', 'B10', 'B14', 'B15', 'B8']] # these combined form Busbar-2 (BB3)
df_BB4 = df[['B12', 'B16', 'B17', 'B11']] # these combined form Busbar-1 (BB4) # excludes B13
df_BB3and4 = pd.concat([df_BB3, df_BB4], axis=1)
df_BB3and4 = df_BB3and4.sum(axis=1)
df_BB3and4real = pd.concat([df['BB2'], df['BB1']], axis=1)
df = pd.concat([df, df_bigserver], axis=1)
if building_abr == 'CH': # CentralHouse
dateparse = lambda x: pd.datetime.strptime(x, '%Y-%b-%d %H:%M:%S.000')
df = pd.read_csv(DataPath + building + '/Data/' + datafile + '_SubmeteringData.csv', date_parser=dateparse,
header=0, index_col=0)
# Check if there are duplicate index values (of which there are in CH data) and remove them...
df = df[~df.index.duplicated(keep='first')]
# There are missing indices in the data, reindex the missing indices of which there are only a few and backfill them
df = df.reindex(pd.date_range(df.index.min(), df.index.max(), freq='15Min'), method='backfill')
df_realweather = pd.DataFrame()
df = df.loc['2016-09-01':'2017-08-31']
# Naming
# cols_new = ['BB2', 'LIFT1', 'LIFT2', 'LIFT3', 'B1', 'B4', 'BB1', 'LIFT4', 'DB21', 'R1', 'Server [B5]', 'R2',
# 'G2', 'G1', 'B31', 'B32', 'B44', 'B52', 'B54', 'B53',
# 'B62', 'B64', 'B11', 'B12', 'B21', 'B22', 'B43', 'B42', 'B51',
# 'B61', 'MP1']
cols = df.columns.tolist()
# STM ['MCP01', 'B2', 'PV', '3A', '3D', 'B41']
cols_new = ['BB2', 'LIFT1', 'LIFT2', 'LIFT3', 'B1', 'B4', 'BB1', 'LIFT4', 'DB21', 'R1', 'Server', 'R2',
'G2', 'G1', 'B31', 'B32', 'B44', 'B52', 'B54', 'B53', 'B62', 'B64', 'B11', 'B12', 'B21', 'B22',
'B43', 'B42', 'B51',
'B61', 'MP1']
for i, v in enumerate(cols):
df.rename(columns={cols[i]: cols_new[i]}, inplace=True)
df_m = df.resample('M').sum()
df_m_sum = df_m.mean(axis=0)
# combine B1 and B2 and B4 (boiler house L&P) as Basement L&P
df_basementLP = pd.concat([df[['B1', 'B4']], df_stm[['B2']]], axis=1, join_axes=[df.index])
df_basementLP = pd.DataFrame(df_basementLP.sum(axis=1), columns=['L&P Basement'])
# combine L&P per floor
df_groundLP = df[['G1', 'G2']]
df_groundLP = pd.DataFrame(df_groundLP.sum(axis=1), columns=['L&P Ground floor'])
# first floor lighting and power
df_firstLP = df[['B12', 'B11']]
df_firstLP = pd.DataFrame(df_firstLP.sum(axis=1), columns=['L&P 1st floor'])
# second floor lighting and power
df_secondLP = df[['B21', 'B22']]
df_secondLP = pd.DataFrame(df_secondLP.sum(axis=1), columns=['L&P 2nd floor'])
# third floor lighting and power
df_thirdLP = pd.concat([df[['B31', 'B32']], df_stm[['3A', '3D']]], axis=1, join_axes=[df.index])
df_thirdLP = pd.DataFrame(df_thirdLP.sum(axis=1), columns=['L&P 3rd floor'])
# fourth floor lighting and power
df_fourthLP = pd.concat([df[['B42', 'B43', 'B44']], df_stm[['B41']]], axis=1, join_axes=[df.index])
df_fourthLP = pd.DataFrame(df_fourthLP.sum(axis=1), columns=['L&P 4th floor']) # [B41, B42]
# fifth floor lighting and power
df_fifthLP = df[['B51', 'B53', 'B54']]
df_fifthLP = pd.DataFrame(df_fifthLP.sum(axis=1), columns=['L&P 5th floor'])
# sixth floor lighting and power
df_sixthLP = df[['B61', 'B62']]
df_sixthLP = pd.DataFrame(df_sixthLP.sum(axis=1), columns=['L&P 6th floor'])
# combine Lifts 1-4
df_lifts = pd.DataFrame(df[['LIFT1', 'LIFT2', 'LIFT3', 'LIFT4']].sum(axis=1), columns=['Lifts'])
# combine R1, R2 and MCP01 as systems
df_mech = pd.concat([df[['R1', 'R2']], df_stm[['MCP01']]], axis=1, join_axes=[df.index])
df_mech = pd.DataFrame(df_mech.sum(axis=1), columns=['Systems'])
df_BBs = pd.concat([df[['BB1', 'BB2']], df_basementLP], axis=1, join_axes=[df.index])
df_BBs = df_BBs.sum(axis=1)
df_BBs = pd.DataFrame(df_BBs)
df_BBs.rename(columns={df_BBs.columns[0]: 'L&P'}, inplace=True) # R1, R2', MCP01
df_BB1 = df[['G1', 'B11', 'B21', 'B61', 'B42']]
df_BB2 = pd.concat([df[['G2', 'B12', 'B22', 'B51', 'B62']], df_stm[['B41']]], axis=1, join_axes=[df.index])
df_lighting = pd.concat([df[['B31', 'B62']], df_stm[['B41']]], axis=1, join_axes=[df.index]) # is this correct?
df_MP1_real = df['MP1']
df_floorsLP = pd.concat(
[df_basementLP, df_groundLP, df_firstLP, df_secondLP, df_thirdLP, df_fourthLP, df_fifthLP, df_sixthLP],
axis=1) # B3 is not measured... (should be small)
df_floorsLP_sum = pd.concat([df_floorsLP, df[['Server']], df_lifts], axis=1)
df_floorsLP_sum = pd.DataFrame(df_floorsLP_sum.sum(axis=1), columns=['L&P'])
df_LVL1 = pd.concat([df_floorsLP_sum, df_mech], axis=1,
join_axes=[df.index]) # B3 is not measured... (should be small)
df_stm = pd.concat([df_stm], axis=1, join_axes=[df.index])
df_mains = pd.DataFrame(
pd.concat([df[['MP1']], df[['B31', 'B32']], df_stm[['3A', '3D']]], axis=1, join_axes=[df.index]).sum(
axis=1), columns=['Mains'])
if building_abr == '17': # 17
dateparse = lambda x: pd.datetime.strptime(x, '%d-%m-%y %H:%M')
# for pilot study
# df = pd.read_csv(DataPath + building + '/Data/17_actual_clean.csv', date_parser=dateparse, header=0, index_col=0)
df = pd.read_csv(DataPath + building + '/Data/17_SubmeteringData.csv', date_parser=dateparse, header=0,
index_col=0)
# Check if there are duplicate index values (of which there are in CH data) and remove them...
df = df[~df.index.duplicated(keep='first')]
# There are missing indices in the data, reindex the missing indices of which there are only a few and backfill them
df = df.reindex(pd.date_range(df.index.min(), df.index.max(), freq='30Min'), method='backfill')
df_realweather = pd.DataFrame()
df = df[:-1]
cols = df.columns.tolist()
print(df.columns)
cols_new = ['Gas', 'B_Power', 'B_Lights', 'B_AC', 'Print', 'Canteen', 'GF_Lights', 'Servers', 'GF_Power',
'1st_Lights', 'GF_AC', 'Lift', '1st_Power', '2nd_Lights', '2nd_Power']
for i, v in enumerate(cols):
df.rename(columns={cols[i]: cols_new[i]}, inplace=True)
# ['Gas', 'B_Power', 'B_Lights', 'B_AC', 'Print', 'Canteen', 'Server', 'GF_Power', 'GF_Lights', 'GF_AC', 'Lift', '1st_Power', '1st_Lights', '2nd_Power', '2nd_Lights']
df_lights = pd.concat([df[['B_Lights', 'GF_Lights', '1st_Lights', '2nd_Lights']]], axis=1)
df_lights = pd.DataFrame(df_lights.sum(axis=1), columns=['Lights'])
df_mech = pd.concat([df[['B_AC', 'GF_AC']]], axis=1)
df_mech = pd.DataFrame(df_mech.sum(axis=1), columns=['AC'])
df_power = pd.concat([df[['B_Power', 'GF_Power', '1st_Power', '2nd_Power']]], axis=1)
df_power = pd.DataFrame(df_power.sum(axis=1), columns=['Power'])
# L&P
df_floorsLP = pd.concat([df[['B_Power', 'B_Lights', 'GF_Power', 'GF_Lights', '1st_Power', '1st_Lights',
'2nd_Power', '2nd_Lights']]], axis=1) # B3 is not measured... (should be small)
df_LVL1 = pd.concat([df_lights, df_power, df[['Gas', 'Servers', 'Canteen', 'Print']]], axis=1)
df_mains = pd.DataFrame(
| pd.concat([df_lights, df_power, df[['Servers', 'Canteen', 'Print']]], axis=1) | pandas.concat |
#!usr/bin/env python3
# -*- coding:utf-8 -*-
# @time : 2021/2/19 16:24
# @author : <NAME>
import os
import pickle
import warnings
from functools import reduce, partial
# from pathos.multiprocessing import ProcessPool as Pool
from multiprocessing import Pool
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
warnings.filterwarnings('ignore')
class xgb_model():
def __init__(self, model_file, ifps, labels):
# get file
self.model_file = model_file
# train model
if not os.path.exists(self.model_file):
print('model file not exist')
# train
print('start training......')
clf = self.train_xgb(ifps, labels)
# save
self.save_model(clf)
# load model
print('load model......')
self.model = self.load_model()
def train_xgb(self, ifps, labels, hyper_rounds=20):
from sklearn.model_selection import cross_val_score, StratifiedKFold
from xgboost import XGBClassifier
from hyperopt import hp, fmin, tpe
# 超参数寻优
def model(hyper_parameter): # 待寻优函数
clf = XGBClassifier(**hyper_parameter,
n_jobs=28,
random_state=42)
e = cross_val_score(clf, ifps, labels, cv=StratifiedKFold(n_splits=5, shuffle=True, random_state=42),
n_jobs=1,
scoring='f1').mean()
print(f'Mean F1 socre:{e:.3f}')
return -e
hyper_parameter = {'n_estimators': hp.choice('n_estimators', range(100, 301, 10)),
'max_depth': hp.choice('max_depth', range(3, 11)),
'learning_rate': hp.loguniform('learning_rate', 1e-8, 0.1),
'reg_lambda': hp.loguniform('reg_lambda', 0.5, 3)} # 选择要优化的超参数
# 创建对应的超参数列表
estimators = [i for i in range(100, 301, 10)]
depth = [i for i in range(3, 11)]
# 寻优
best = fmin(model, hyper_parameter, algo=tpe.suggest, max_evals=hyper_rounds,
rstate=np.random.RandomState(42)) # 寻找model()函数的最小值,计算次数为100次
# 训练
clf = XGBClassifier(n_estimators=estimators[best['n_estimators']],
max_depth=depth[best['max_depth']],
learning_rate=best['learning_rate'],
reg_lambda=best['reg_lambda'],
n_jobs=-1, random_state=42) # 定义分类器
clf.fit(X=ifps, y=labels)
return clf
def save_model(self, clf):
with open(self.model_file, 'wb') as f:
pickle.dump(clf, f)
def load_model(self):
with open(self.model_file, 'rb') as f:
return pickle.load(f)
def predict(self, x):
model = self.load_model()
pred_y = model.predict(x)
pred_proba = model.predict_proba(x)
return pred_y, pred_proba.T[1]
def metric(self, pred_proba, pred_y, y_true):
from sklearn.metrics import f1_score, accuracy_score, roc_auc_score, recall_score, precision_score, confusion_matrix
f1_ = f1_score(pred_y, y_true)
acc = accuracy_score(pred_y, y_true)
roc_auc_ = roc_auc_score(y_true, pred_proba)
recall = recall_score(y_true=y_true, y_pred=pred_y)
precision = precision_score(y_true=y_true, y_pred=pred_y)
tn, fp, fn, tp = confusion_matrix(y_true, pred_y).ravel()
print(
f'---------------metric---------------\nF1_score:{f1_:.3f} || Accuracy:{acc:.3f} || ROC_AUC:{roc_auc_:.3f}|| Recall:{recall:.3f} || Precision:{precision:.3f}\nTN:{tn:.3f} || FP:{fp:.3f} || FN:{fn:.3f}|| TP:{tp:.3f}')
def get_des_label(csv_file, des_type):
# read_file
df = pd.read_csv(csv_file, encoding='utf-8').dropna()
# add label
df_ac = df[df.iloc[:, 0].str.contains('_0')]
df_ac['label'] = np.ones((len(df_ac)))
df_inac = df[~df.iloc[:, 0].str.contains('_0')]
df_inac['label'] = np.zeros((len(df_inac)))
# merge
df = df_ac.append(df_inac, sort=False)
# get data
if des_type == 'sp':
df = df.iloc[:, [0] + list(range(7, 19))]
return df
def get_top_n(molecule_name, df, top_n=50, ascending=True):
df = df[df.iloc[:, 0].str.startswith(f'{molecule_name}_')]
df_seed = | pd.DataFrame(df.iloc[0, :]) | pandas.DataFrame |
from non_feature_based.opencrowd_gibbs import sampler
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score,roc_auc_score
def gete2t(train_size,truth_labels):
e2t = {}
for example in range(train_size):
e2t[example] = truth_labels[example]
return e2t
def run_experiment(epochs, file_out, value_range, value_name, param):
ground_truth = pd.factorize( | pd.read_csv(param['labels_file'],sep=",") | pandas.read_csv |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = "foo"
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
assert result.name == ts.name
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]"))
exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self, sort=sort):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
tm.assert_frame_equal(result, expected)
result = concat(pieces, keys=["A", "B", "C"], axis=1)
expected = DataFrame(pieces, index=["A", "B", "C"]).T
tm.assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name="A")
s2 = Series(randn(5), name="B")
result = concat([s, s2], axis=1)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object"))
# must reindex, #2603
s = Series(randn(3), index=["c", "a", "b"], name="A")
s2 = Series(randn(4), index=["d", "a", "b", "c"], name="B")
result = concat([s, s2], axis=1, sort=sort)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_names_applied(self):
# ensure names argument is not ignored on axis=1, #23490
s = Series([1, 2, 3])
s2 = Series([4, 5, 6])
result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A")
)
tm.assert_frame_equal(result, expected)
result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]],
columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]),
)
tm.assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_timedelta64_block(self):
from pandas import to_timedelta
rng = to_timedelta(np.arange(10), unit="s")
df = DataFrame({"time": rng})
result = concat([df, df])
assert (result.iloc[:10]["time"] == rng).all()
assert (result.iloc[10:]["time"] == rng).all()
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = | concat([ts1, ts2], join="outer", axis=1) | pandas.concat |
import pendulum as pdl
import sys
sys.path.append(".")
# the memoization-related library
import loguru
import itertools
import portion
import klepto.keymaps
import CacheIntervals as ci
from CacheIntervals.utils import flatten
from CacheIntervals.utils import pdl2pd, pd2pdl
from CacheIntervals.utils import Timer
from CacheIntervals.Intervals import pd2po, po2pd
from CacheIntervals.RecordInterval import RecordIntervals, RecordIntervalsPandas
class QueryRecorder:
'''
A helper class
'''
pass
class MemoizationWithIntervals(object):
'''
The purpose of this class is to optimise
the number of call to a function retrieving
possibly disjoint intervals:
- do standard caching for a given function
- additively call for a date posterior to one
already cached is supposed to yield a pandas
Frame which can be obtained by concatenating
the cached result and a -- hopefully much --
smaller query
Maintains a list of intervals that have been
called.
With a new interval:
-
'''
keymapper = klepto.keymaps.stringmap(typed=False, flat=False)
def __init__(self,
pos_args=None,
names_kwarg=None,
classrecorder=RecordIntervalsPandas,
aggregation=lambda listdfs: pd.concat(listdfs, axis=0),
debug=False,
# memoization=klepto.lru_cache(
# cache=klepto.archives.hdf_archive(
# f'{pdl.today().to_date_string()}_memoization.hdf5'),
# keymap=keymapper),
memoization=klepto.lru_cache(
cache=klepto.archives.dict_archive(),
keymap=keymapper),
**kwargs):
'''
:param pos_args: the indices of the positional
arguments that will be handled as intervals
:param names_kwarg: the name of the named parameters
that will be handled as intervals
:param classrecorder: the interval recorder type
we want to use
:param memoization: a memoization algorithm
'''
# A dictionary of positional arguments indices
# that are intervals
self.argsi = {}
self.kwargsi = {}
# if pos_args is not None:
# for posarg in pos_args:
# self.argsi[posarg] = classrecorder(**kwargs)
self.pos_args_itvl = pos_args if pos_args is not None else []
#print(self.args)
# if names_kwarg is not None:
# for namedarg in names_kwarg:
# self.kwargsi[namedarg] = classrecorder(**kwargs)
self.names_kwargs_itvl = names_kwarg if names_kwarg is not None else {}
#print(self.kwargs)
self.memoization = memoization
self.aggregation = aggregation
self.debugQ = debug
self.argsdflt = None
self.kwargsdflt = None
self.time_last_call = pdl.today()
self.classrecorder = classrecorder
self.kwargsrecorder = kwargs
self.argssolver = None
self.query_recorder = QueryRecorder()
def __call__(self, f):
'''
The interval memoization leads to several calls to the
standard memoised function and generates a list of return values.
The aggregation is needed for the doubly lazy
function to have the same signature as the
To access, the underlying memoized function pass
get_function_cachedQ=True to the kwargs of the
overloaded call (not of this function
:param f: the function to memoize
:return: the wrapper to the memoized function
'''
if self.argssolver is None:
self.argssolver = ci.Functions.ArgsSolver(f, split_args_kwargsQ=True)
@self.memoization
def f_cached(*args, **kwargs):
'''
The cached function is used for a double purpose:
1. for standard calls, will act as the memoised function in a traditional way
2. Additively when pass parameters of type QueryRecorder, it will create
or retrieve the interval recorders associated with the values of
non-interval parameters.
In this context, we use the cached function as we would a dictionary.
'''
QueryRecorderQ = False
args_new = []
kwargs_new = {}
'''
check whether this is a standard call to the user function
or a request for the interval recorders
'''
for i,arg in enumerate(args):
if isinstance(arg, QueryRecorder):
args_new.append(self.classrecorder(**self.kwargsrecorder))
QueryRecorderQ = True
else:
args_new.append(args[i])
for name in kwargs:
if isinstance(kwargs[name], QueryRecorder):
kwargs_new[name] = self.classrecorder(**self.kwargsrecorder)
QueryRecorderQ = True
else:
kwargs_new[name] = kwargs[name]
if QueryRecorderQ:
return args_new, kwargs_new
return f(*args, **kwargs)
def wrapper(*args, **kwargs):
if kwargs.get('get_function_cachedQ', False):
return f_cached
#loguru.logger.debug(f'function passed: {f_cached}')
loguru.logger.debug(f'args passed: {args}')
loguru.logger.debug(f'kwargs passed: {kwargs}')
# First pass: resolve the recorders
dargs_exp, kwargs_exp = self.argssolver(*args, **kwargs)
# Intervals are identified by position and keyword name
# 1. First get the interval recorders
args_exp = list(dargs_exp.values())
args_exp_copy = args_exp.copy()
kwargs_exp_copy = kwargs_exp.copy()
for i in self.pos_args_itvl:
args_exp_copy[i] = self.query_recorder
for name in self.names_kwargs_itvl:
kwargs_exp_copy[name] = self.query_recorder
args_with_ri, kwargs_with_ri = f_cached(*args_exp_copy, **kwargs_exp_copy)
# 2. Now get the the actual list of intervals
for i in self.pos_args_itvl:
# reuse args_exp_copy to store the list
args_exp_copy[i] = args_with_ri[i](args_exp[i])
for name in self.names_kwargs_itvl:
# reuse kwargs_exp_copy to store the list
kwargs_exp_copy[name] = kwargs_with_ri[name](kwargs_exp[name])
'''3. Then generate all combination of parameters
3.a - args'''
ns_args = range(len(args_exp))
lists_possible_args = [[args_exp[i]] if i not in self.pos_args_itvl else args_exp_copy[i] for i in ns_args]
# Take the cartesian product of these
calls_args = list( map(list,itertools.product(*lists_possible_args)))
'''3.b kwargs'''
#kwargs_exp_vals = kwargs_exp_copy.values()
names_kwargs = list(kwargs_exp_copy.keys())
lists_possible_kwargs = [[kwargs_exp[name]] if name not in self.names_kwargs_itvl
else kwargs_exp_copy[name] for name in names_kwargs]
calls_kwargs = list(map(lambda l: dict(zip(names_kwargs,l)), itertools.product(*lists_possible_kwargs)))
calls = list(itertools.product(calls_args, calls_kwargs))
if self.debugQ:
results = []
for call in calls:
with Timer() as timer:
results.append(f_cached(*call[0], **call[1]) )
print('Timer to demonstrate caching:')
timer.display(printQ=True)
else:
results = [f_cached(*call[0], **call[1]) for call in calls]
result = self.aggregation(results)
return result
return wrapper
if __name__ == "__main__":
import logging
import daiquiri
import pandas as pd
import time
daiquiri.setup(logging.DEBUG)
logging.getLogger('OneTick64').setLevel(logging.WARNING)
logging.getLogger('databnpp.ODCB').setLevel(logging.WARNING)
logging.getLogger('requests_kerberos').setLevel(logging.WARNING)
pd.set_option('display.max_rows', 200)
pd.set_option('display.width', 600)
pd.set_option('display.max_columns', 200)
tssixdaysago = pdl2pd(pdl.yesterday('UTC').add(days=-5))
tsfivedaysago = pdl2pd(pdl.yesterday('UTC').add(days=-4))
tsfourdaysago = pdl2pd(pdl.yesterday('UTC').add(days=-3))
tsthreedaysago = pdl2pd(pdl.yesterday('UTC').add(days=-2))
tstwodaysago = pdl2pd(pdl.yesterday('UTC').add(days=-1))
tsyesterday = pdl2pd(pdl.yesterday('UTC'))
tstoday = pdl2pd(pdl.today('UTC'))
tstomorrow = pdl2pd(pdl.tomorrow('UTC'))
tsintwodays = pdl2pd(pdl.tomorrow('UTC').add(days=1))
tsinthreedays = pdl2pd(pdl.tomorrow('UTC').add(days=2))
def print_calls(calls):
print( list( map( lambda i: (i.left, i.right), calls)))
def print_calls_dates(calls):
print( list( map( lambda i:
(pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()),
calls)))
def display_calls(calls):
loguru.logger.info( list( map( lambda i:
(pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()),
calls)))
# Testing record intervals -> ok
if True:
itvals = RecordIntervals()
calls = itvals(portion.closed(pdl.yesterday(), pdl.today()))
print(list(map( lambda i: (i.lower.to_date_string(), i.upper.to_date_string()), calls)))
print(list(map(lambda i: type(i), calls)))
calls = itvals( portion.closed(pdl.yesterday().add(days=-1), pdl.today().add(days=1)))
#print(calls)
print( list( map( lambda i: (i.lower.to_date_string(), i.upper.to_date_string()),
calls)))
# Testing record intervals pandas -> ok
if True:
itvals = RecordIntervalsPandas()
# yesterday -> today
calls = itvals(pd.Interval(pdl2pd(pdl.yesterday()), pdl2pd(pdl.today()), closed='left'))
print( list( map( lambda i: (pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()), calls)))
# day before yesterday -> tomorrow: should yield 3 intervals
calls = itvals(pd.Interval(pdl2pd(pdl.yesterday().add(days=-1)), pdl2pd(pdl.today().add(days=1))))
print( list( map( lambda i: (pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()), calls)))
# day before yesterday -> day after tomorrow: should yield 4 intervals
calls = itvals(
pd.Interval(pdl2pd(pdl.yesterday().add(days=-1)),
pdl2pd(pdl.tomorrow().add(days=1))))
print(
list(
map(
lambda i:
(pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()),
calls)))
# 2 days before yesterday -> 2day after tomorrow: should yield 6 intervals
calls = itvals(
pd.Interval(pdl2pd(pdl.yesterday().add(days=-2)),
pdl2pd(pdl.tomorrow().add(days=2))))
print(list(map( lambda i:
(pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()),
calls)))
# Further tests on record intervals pandas
if False:
itvals = RecordIntervalsPandas()
calls = itvals(pd.Interval(tstwodaysago, tstomorrow, closed='left'))
display_calls(calls)
calls = itvals( pd.Interval(tstwodaysago, tsyesterday))
display_calls(calls)
calls = itvals(
pd.Interval(tstwodaysago, tsintwodays))
display_calls(calls)
calls = itvals(
pd.Interval(pdl2pd(pdl.yesterday().add(days=-2)),
pdl2pd(pdl.tomorrow().add(days=2))))
display_calls(calls)
# proof-of_concept of decorator to modify function parameters
if False:
class dector_arg:
# a toy model
def __init__(self,
pos_arg=None,
f_arg=None,
name_kwarg=None,
f_kwarg=None):
'''
:param pos_arg: the positional argument
:param f_arg: the function to apply to the positional argument
:param name_kwarg: the keyword argument
:param f_kwarg: the function to apply to the keyword argument
'''
self.args = {}
self.kwargs = {}
if pos_arg:
self.args[pos_arg] = f_arg
print(self.args)
if name_kwarg:
self.kwargs[name_kwarg] = f_kwarg
print(self.kwargs)
def __call__(self, f):
'''
the decorator action
:param f: the function to decorate
:return: a function whose arguments
have the function f_args and f_kwargs
pre-applied.
'''
self.f = f
def inner_func(*args, **kwargs):
print(f'function passed: {self.f}')
print(f'args passed: {args}')
print(f'kwargs passed: {kwargs}')
largs = list(args)
for i, f in self.args.items():
print(i)
print(args[i])
largs[i] = f(args[i])
for name, f in self.kwargs.items():
kwargs[name] = f(kwargs[name])
return self.f(*largs, **kwargs)
return inner_func
dec = dector_arg(pos_arg=0,
f_arg=lambda x: x + 1,
name_kwarg='z',
f_kwarg=lambda x: x + 1)
@dector_arg(1, lambda x: x + 1, 'z', lambda x: x + 1)
def g(x, y, z=3):
'''
The decorated function should add one to the second
positional argument and
:param x:
:param y:
:param z:
:return:
'''
print(f'x->{x}')
print(f'y->{y}')
print(f'z->{z}')
g(1, 10, z=100)
if False:
memo = MemoizationWithIntervals()
# testing MemoizationWithIntervals
# typical mechanism
if False:
@MemoizationWithIntervals(
None, ['interval'],
aggregation=list,
debug=True,
memoization=klepto.lru_cache(
maxsize=200,
cache=klepto.archives.hdf_archive(
f'{pdl.today().to_date_string()}_memoisation.hdf5'),
keymap=klepto.keymaps.stringmap(typed=False, flat=False)))
def function_with_interval_param(dummy1,dummy2, kdummy=1,
interval=pd.Interval(tstwodaysago, tstomorrow)):
time.sleep(1)
print('****')
print(f'dummy1: {dummy1}, dummy2: {dummy2}')
print(f'kdummy: {kdummy}')
print(f'interval: {interval}')
return [dummy1, dummy2, kdummy, interval]
print('=*=*=*=* MECHANISM DEMONSTRATION =*=*=*=*')
print('==== First pass ===')
print("initialisation with an interval from yesterday to today")
# function_with_interval_params(pd.Interval(pdl.yesterday(), pdl.today(),closed='left'),
# interval1 = pd.Interval(pdl.yesterday().add(days=0),
# pdl.today(), closed='both')
# )
print( f'Final result:\n{function_with_interval_param(0, 1, interval=pd.Interval(tsyesterday, tstoday))}')
print('==== Second pass ===')
print("request for data from the day before yesterday to today")
print("expected split in two intervals with results from yesterday to today being cached")
print(
f'Final result: {function_with_interval_param(0,1, interval=pd.Interval(tstwodaysago, tstoday))}'
)
print('==== 3rd pass ===')
print("request for data from three days to yesterday")
print("expected split in two intervals")
print(f'Final result:\n {function_with_interval_param(0,1, interval=pd.Interval(tsthreedaysago, tsyesterday))}' )
print('==== 4th pass ===')
print("request for data from three days to tomorrow")
print("expected split in three intervals")
print(f'Final result:\n\
{function_with_interval_param(0,1, interval1= pd.Interval(tsthreedaysago, tstomorrow))}' )
print('==== 5th pass ===')
print("request for data from two days ago to today with different first argument")
print("No caching expected and one interval")
print( f'Final result:\n{function_with_interval_param(1, 1, interval=pd.Interval(tstwodaysago, tstoday))}' )
print('==== 6th pass ===')
print("request for data from three days ago to today with different first argument")
print("Two intervals expected")
print( f'Final result: {function_with_interval_param(1, 1, interval=pd.Interval(tsthreedaysago, tstoday))}' )
# Testing with an interval as position argument and one interval as keyword argument
if False:
@MemoizationWithIntervals(
[0], ['interval1'],
aggregation=list,
debug=True,
memoization=klepto.lru_cache(
maxsize=200,
cache=klepto.archives.hdf_archive(
f'{pdl.today().to_date_string()}_memoisation.hdf5'),
keymap=klepto.keymaps.stringmap(typed=False, flat=False)))
def function_with_interval_params(interval0,
interval1=pd.Interval(tstwodaysago, tstomorrow)):
time.sleep(1)
print('***')
print(f'interval0: {interval0}')
print(f'interval1: {interval1}')
return (interval0, interval1)
print('=*=*=*=* DEMONSTRATION WITH TWO INTERVAL PARAMETERS =*=*=*=*')
print('==== First pass ===')
print(f'Initialisation: first interval:\nyest to tday - second interval: two days ago to tomorrow')
print(f'Final result:\n{function_with_interval_params(pd.Interval(tsyesterday, tstoday))}')
print('==== Second pass ===')
print(f'Call with first interval:\n3 days ago to tday - second interval: unchanged')
print('Expected caching and split of first interval in two')
print( f'Final result: {function_with_interval_params(pd.Interval(tsthreedaysago, tstoday))}' )
print('==== 3rd pass ===')
print(f'Call with first interval:\nunchanged - second interval: yest to today')
print('Expected only cached results and previous split of first interval')
print(f'Final result:\n {function_with_interval_params(pd.Interval(tsthreedaysago, tstoday), interval1 = pd.Interval(tsyesterday, tstoday))}' )
print('==== 4th pass ===')
print(f'Call with first interval:\n3 days ago to today - second interval: yest to today')
print('Expected only cached results and only split of first interval')
print(f'Final result:\n {function_with_interval_params(pd.Interval(tsthreedaysago, tstoday), interval1 = pd.Interval(tsyesterday, tstoday))}' )
print('==== 5th pass ===')
print(f'Call with first interval:\n3 days ago to yesterday - second interval: 3 days ago to tomorrow')
print('Expected no split of first interval and split of second interval in two. Only one none-cached call')
print(f'Final result:\n\
{function_with_interval_params(pd.Interval(tsthreedaysago, tsyesterday), interval1= pd.Interval(tsthreedaysago, tstomorrow))}'
)
print('==== 6th pass ===')
print(f'Call with first interval:\n3 days ago to today - second interval: 3 days ago to tomorrow')
print('Expected split of first interval in two and split of second interval in two. One non-cached call: today - tomorrow x ')
print(f'Final result:\n\
{function_with_interval_params(pd.Interval(tsthreedaysago, tstoday), interval1=pd.Interval(tsthreedaysago, tstomorrow))}'
)
# Showing the issue with the current version
if False:
@MemoizationWithIntervals(None,
['interval'],
aggregation=list,
debug=True,
memoization=klepto.lru_cache(
maxsize=200,
keymap=klepto.keymaps.stringmap(typed=False, flat=False)))
def function_with_interval_param(valint,
interval=pd.Interval(tstwodaysago, tstomorrow)):
time.sleep(1)
print('**********************************')
print(f'valint: {valint}')
print(f'interval: {interval}')
return (valint, interval)
print('==== First pass ===')
print( f'Final result:\n{function_with_interval_param(2, interval=pd.Interval(tsyesterday, tstoday))}')
print('==== Second pass ===')
print(f'Final result: {function_with_interval_param(2, interval=pd.Interval(tsthreedaysago, tstoday))}')
print('==== 3rd pass ===')
print( f'Final result:\n {function_with_interval_param(3, interval=pd.Interval(tsthreedaysago, tstoday))}')
print('==== 4th pass ===')
print(f'Final result:\n\ {function_with_interval_param(3, interval=pd.Interval(tsthreedaysago, tstomorrow))}')
# testing getting back the memoized function from MemoizationWithIntervals
if False:
@MemoizationWithIntervals(
[0], ['interval1'],
aggregation=list,
debug=True,
memoization=klepto.lru_cache(
maxsize=200,
cache=klepto.archives.file_archive(
f'{pdl.today().to_date_string()}_memoisation.pkl'),
keymap=klepto.keymaps.stringmap(typed=False, flat=False)))
def function_with_interval_params(interval0,
interval1=pd.Interval(
tstwodaysago,
tstomorrow)):
time.sleep(1)
print('**********************************')
print(f'interval0: {interval0}')
print(f'interval1: {interval1}')
return (interval0, interval1)
print('==== First pass ===')
# function_with_interval_params(pd.Interval(pdl.yesterday(), pdl.today(),closed='left'),
# interval1 = pd.Interval(pdl.yesterday().add(days=0),
# pdl.today(), closed='both')
# )
f_mzed = function_with_interval_params(get_function_cachedQ=True)
print(
f'Final result:\n{function_with_interval_params(pd.Interval(tsyesterday, tstoday))}'
)
print(f'==============\nf_memoized live cache: {f_mzed.__cache__()}')
print(f'f_memoized live cache type: {type(f_mzed.__cache__())}')
print(f'f_memoized file cache: {f_mzed.__cache__().archive}')
print(f'f_memoized live cache: {f_mzed.info()}')
f_mzed.__cache__().dump()
print(f'f_memoized file cache: {f_mzed.__cache__().archive}')
# print('==== Second pass ===')
# print(f'Final result: {function_with_interval_params(pd.Interval(pdl.yesterday().add(days=-2), pdl.today()))}')
# print('==== 3rd pass ===')
# print(f'Final result:\n\
# {function_with_interval_params(pd.Interval(pdl.yesterday().add(days=-2), pdl.yesterday()), interval1 = pd.Interval(pdl.yesterday().add(days=0), pdl.today()))}')
# print('==== 4th pass ===')
# print(f'Final result:\n\
# {function_with_interval_params(pd.Interval(pdl.yesterday().add(days=-2), pdl.yesterday()), interval1= pd.Interval(pdl.yesterday().add(days=-2), pdl.tomorrow()))}')
# testing serialization with HDF5 memoized function from MemoizationWithIntervals
if False:
@MemoizationWithIntervals(
[0], ['interval1'],
aggregation=list,
debug=True,
memoization=klepto.lru_cache(
maxsize=200,
cache=klepto.archives.hdf_archive(
f'{pdl.today().to_date_string()}_memoisation.hdf5',
serialized=True,
cached=False,
meta=False),
keymap=klepto.keymaps.stringmap(typed=False, flat=False)))
def function_with_interval_params(interval0,
interval1=pd.Interval(
tstwodaysago,
tstomorrow)):
time.sleep(1)
print('*********** function called *******************')
print(f'interval0: {interval0}')
print(f'interval1: {interval1}')
return (interval0, interval1)
print('==== First pass ===')
# function_with_interval_params(pd.Interval(pdl.yesterday(), pdl.today(),closed='left'),
# interval1 = pd.Interval(pdl.yesterday().add(days=0),
# pdl.today(), closed='both')
# )
f_mzed = function_with_interval_params(get_function_cachedQ=True)
print(
f'Final result:\n{function_with_interval_params(pd.Interval(tsyesterday, tstoday))}'
)
print(f'==============\nf_memoized live cache: {f_mzed.__cache__()}')
print(f'f_memoized live cache type: {type(f_mzed.__cache__())}')
print(f'f_memoized file cache: {f_mzed.__cache__().archive}')
print(f'f_memoized live cache: {f_mzed.info()}')
f_mzed.__cache__().dump()
print(f'f_memoized file cache: {f_mzed.__cache__().archive}')
if False:
@MemoizationWithIntervals([0], aggregation=list, debug=False)
def function_with_interval_params(interval0):
time.sleep(1)
print('**********************************')
print(f'interval0: {interval0}')
return (interval0)
print('==== First pass ===')
print(
f'Final result: {function_with_interval_params(pd.Interval(tsyesterday, tstoday))}'
)
print('==== Second pass ===')
print(
f'Final result: {function_with_interval_params(pd.Interval(tsthreedaysago, tstoday) )}'
)
print('==== 3rd pass ===')
print(
f'Final result: {function_with_interval_params(pd.Interval(tsthreedaysago, tsyesterday))}'
)
print('==== 4th pass ===')
print(
f'Final result: {function_with_interval_params(pd.Interval(tsthreedaysago, tstomorrow))}'
)
# Testing kwargs only
if False:
@MemoizationWithIntervals([], ['period'],
aggregation=list,
debug=False)
def function_with_interval_params(array=['USD/JPY'],
period=pd.Interval( tsyesterday, pd.Timestamp.now('UTC'))):
time.sleep(1)
print('************* function called *********************')
print(f'interval0: {period}')
return (array, period)
print('==== First pass ===')
print(
f'Final result: {function_with_interval_params(array=["USD/JPY"], period = pd.Interval(tsyesterday, pd.Timestamp.now(tz="UTC")))}'
)
print('==== Second pass ===')
print(
f'Final result: {function_with_interval_params(array=["USD/JPY"],period = pd.Interval(tsyesterday, pd.Timestamp.now(tz="UTC")) )}'
)
print('==== 3rd pass ===')
print(
f'Final result: {function_with_interval_params(array=["USD/JPY"],period = pd.Interval(tsyesterday, pd.Timestamp.now(tz="UTC")))}'
)
# Testing tolerance
if False:
timenow = pdl.now()
timenowplus5s = timenow.add(seconds=5)
fiveseconds = timenowplus5s - timenow
@MemoizationWithIntervals([], ['period'],
aggregation=list,
debug=False,
rounding=fiveseconds)
def function_with_interval_params(array=['USD/JPY'],
period=pd.Interval(tsyesterday, pd.Timestamp.now(tz="UTC"))
):
time.sleep(1)
print('************* function called *********************')
print(f'interval0: {period}')
return (period)
print('==== First pass ===')
print(
f'Final result: {function_with_interval_params(array=["USD/JPY"], period=pd.Interval(tstoday, pd.Timestamp.now(tz="UTC")))}'
)
print('==== Second pass ===')
time.sleep(1)
print(
f'Final result: {function_with_interval_params(["USD/JPY"], period=pd.Interval(tstoday, pd.Timestamp.now(tz="UTC")))}'
)
time.sleep(6)
print('==== 3rd pass ===')
print(
f'Final result: {function_with_interval_params(["USD/JPY"], period=pd.Interval(tstoday, pd.Timestamp.now(tz="UTC")))}'
)
print('==== 4th pass ===')
print(
f'Final result: {function_with_interval_params(["USD/JPY"], period = pd.Interval(tstoday, pd.Timestamp.now(tz="UTC")))}'
)
if False:
itvals = RecordIntervalsPandas()
calls = itvals(pd.Interval(pdl.yesterday(), pdl.today()))
print(
list(
map(
lambda i:
(i.left.to_date_string(), i.right.to_date_string()),
calls)))
calls = itvals(pd.Interval(pdl.yesterday().add(days=-2), pdl.today()))
print(
list(
map(
lambda i:
(i.left.to_date_string(), i.right.to_date_string()),
calls)))
calls = itvals(
pd.Interval(pdl.yesterday().add(days=-2), pdl.yesterday()))
print(
list(
map(
lambda i:
(i.left.to_date_string(), i.right.to_date_string()),
calls)))
calls = itvals(
pd.Interval(pdl.yesterday().add(days=-4), pdl.tomorrow()))
print(
list(
map(
lambda i:
(i.left.to_date_string(), i.right.to_date_string()),
calls)))
if False:
def solution(data, n):
counts = {}
counts = {x: counts.get(x, data.count(x)) for x in data}
return [x for x in data if counts[x] <= n]
print(solution([1, 2, 3], 0))
print(solution([1, 2, 2, 3, 3, 4, 5, 5], 1))
if False:
cont = {0: [0, 1], 2: [3, 4]}
argsorg = [5, 6, 7]
calls_args = [[
arg if i not in cont.keys() else cont[i][j]
for i, arg in enumerate(argsorg)
] for p in cont.keys() for j in range(len(cont[p]))]
if True:
print("Testing subintervals and strategies")
print("1. No sub")
itvals_nosub = RecordIntervalsPandas(subintervals_requiredQ=False, subinterval_minQ=False)
print("-6->-1")
calls = itvals_nosub(pd.Interval(-6, -1))
print("-3->0")
calls = itvals_nosub(pd.Interval(-3, 0 ))
print_calls(calls)
print("2. No sub first strategy")
itvals_sub = RecordIntervalsPandas(subintervals_requiredQ=True, subinterval_minQ=False)
print("-6->-1")
calls = itvals_sub(pd.Interval(-6,-1))
print("-3->0")
calls = itvals_sub(pd.Interval(-3,0))
print_calls(calls)
print("3. Sub second strategy")
itvals_sub2 = RecordIntervalsPandas(subintervals_requiredQ=True, subinterval_minQ=True)
print("-6->-1")
calls = itvals_sub2(pd.Interval(-6,-1))
print("-3->0")
calls = itvals_sub2(pd.Interval(-3,0))
print_calls(calls)
# Test ok
if False:
print("Testing subintervals and strategies")
print("1. No sub")
itvals_nosub = RecordIntervalsPandas(subintervals_requiredQ=False, subinterval_minQ=False)
print("-6->-1")
calls = itvals_nosub(pd.Interval(tssixdaysago, tsyesterday))
print("-3->0")
calls = itvals_nosub(pd.Interval(tsthreedaysago, tstoday ))
print_calls(calls)
print("2. No sub first strategy")
itvals_sub = RecordIntervalsPandas(subintervals_requiredQ=True, subinterval_minQ=False)
print("-6->-1")
calls = itvals_sub(pd.Interval(tssixdaysago, tsyesterday))
print("-3->0")
calls = itvals_sub(pd.Interval(tsthreedaysago, tstoday ))
print_calls(calls)
print("3. Sub second strategy")
itvals_sub2 = RecordIntervalsPandas(subintervals_requiredQ=True, subinterval_minQ=True)
print("-6->-1")
calls = itvals_sub2(pd.Interval(tssixdaysago, tsyesterday))
print("-3->0")
calls = itvals_sub2(pd.Interval(tsthreedaysago, tstoday))
print_calls(calls)
if False:
print("Testing subinterval and first strategy")
itvals = RecordIntervalsPandas(subintervals_requiredQ=True, subinterval_minQ=False)
calls = itvals(pd.Interval(tsfourdaysago, tsthreedaysago))
print(list(map(lambda i: (i.left, i.right), calls)))
calls = itvals(pd.Interval(tstwodaysago, tstoday))
print(list(map(lambda i: (i.left, i.right), calls)))
calls = itvals(pd.Interval(tssixdaysago, tsyesterday))
print(list(map(lambda i: (i.left, i.right), calls)))
calls = itvals(pd.Interval(tssixdaysago, tsyesterday))
print("should be broken in 3 intervals: -5->-4 | -4->-3 | -3->-1")
print(sorted(list(map(lambda i: (i.left, i.right), calls))))
if False:
print("Testing subinterval and second strategy")
itvals = RecordIntervalsPandas(subintervals_requiredQ=True, subinterval_minQ=True)
calls = itvals(pd.Interval(tsfourdaysago, tsthreedaysago))
print(list(map(lambda i: (i.left, i.right), calls)))
calls = itvals(pd.Interval(tstwodaysago, tstoday))
print(list(map(lambda i: (i.left, i.right), calls)))
calls = itvals(pd.Interval(tssixdaysago, tsyesterday))
print(sorted(list(map(lambda i: (i.left, i.right), calls))))
calls = itvals(pd.Interval(tssixdaysago, tsyesterday))
print("should be broken in 3 intervals: -5->-4 | -4->-3 | -3->-1")
print(sorted(list(map(lambda i: (i.left, i.right), calls))))
if False:
print("Testing subinterval and first strategy")
itvals = RecordIntervalsPandas(subintervals_requiredQ=True, subinterval_minQ=False)
calls = itvals(pd.Interval(-2, 0))
print_calls(calls)
calls = itvals(pd.Interval(-4, -3))
print_calls(calls)
calls = itvals(pd.Interval(-6, 1))
print("should be broken in 3 intervals: -6->-4 | -4->-3 | -3->-2 | -2->0 | 0->1")
print_calls(calls)
if False:
print("Testing subinterval and second strategy")
itvals = RecordIntervalsPandas(subintervals_requiredQ=True, subinterval_minQ=True)
calls = itvals( | pd.Interval(-2, 0) | pandas.Interval |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.