prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# Copyright 2018, <NAME>, <NAME>
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# filter future warnings
import warnings
warnings.simplefilter("ignore", category=FutureWarning)
from epee import *
import numpy as np
import pandas as pd
import argparse
import logging
import time
import os
import itertools
import multiprocessing
from time import localtime, strftime
# set tensorflow verbosity
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--conditiona", help="RNA-seq data for Condition A",
type=str, required=True)
parser.add_argument("-b", "--conditionb", help="RNA-seq data for Condition B",
type=str, required=True)
parser.add_argument("-na", "--networka", help="Network for condition A",
type=str, required=True)
parser.add_argument("-nb", "--networkb", help="Network for condition B",
type=str, required=True)
# DEFAULTS
parser.add_argument("-o", "--output", help="output directory", type=str,
default='')
parser.add_argument("-reg1", "--lregularization", help="lasso regularization \
parameter", type=float, default=0.01)
parser.add_argument("-reg2", "--gregularization", help="graph contrained \
regularization parameter", type=float, default=0.01)
parser.add_argument("-s", "--step", help="optimizer learning-rate",
type=float, default=0.0001)
parser.add_argument("-c", "--conditioning", help="Weight for the interactions \
not known", type=bool, default=True)
parser.add_argument("-r", "--runs", help="Number of independent runs", type=int,
default=20)
parser.add_argument("-i", "--iterations", help="Number of iterations",
type=int, default=100000)
parser.add_argument("-ag", "--aggregation", help="""
Method for aggregating runs. Default: "sum"
Valid options: {"mean", "median", "sum"} """,
type=str, default='sum')
parser.add_argument("-n", "--normalize", help="""
Weight normalization strategy. Default:"minmax"
Valid options: {"minmax", "log", "log10", "no"} """,
type=str, default='minmax')
parser.add_argument("-m", "--model", help="""
Model regularization choice. Default: "epee-gcl"
Valid options: {"epee-gcl","epee-l","no-penalty" """,
type=str, default='epee-gcl')
parser.add_argument("-v", "--verbose",
help="logging info levels 10, 20, or 30",
type=int, default=10)
# OPTIONAL SETTINGS
parser.add_argument("-eval", "--evaluate",
help="Evaluation mode available for Th1, Th2, Th17, \
Bmem, COAD, and AML",
type=str, default=None)
parser.add_argument("-pr", "--prefix",
help="Add prefix to the log",
type=str, default=strftime('%Y%m%d'))
# OPTIONAL FLAGS
parser.add_argument("-w", "--store_weights",
help="Store all the inferred weights",
action='store_true')
parser.add_argument("-mp", "--multiprocess",
help="multiprocess the calculation of perturb and \
regulator scores", action='store_true')
# NULL FLAG
parser.add_argument("-null", "--null",
help="Generate null scores by label permutation",
action='store_true')
# NULL SETTINGS
parser.add_argument("-d", "--seed", help="Starting seed number",
type=int, default=0)
parser.add_argument("-p", "--perturb", help="True label perturb scores. Required when running permutations for null model",
type=str, default=None)
parser.add_argument("-sg", "--shuffle_genes",
help="Generate null scores by gene permutation",
action='store_true')
def get_scores(sel):
"""To get perturb and regulator score"""
y1, w1, w1_df, y2, w2, w2_df, count = sel
# Calculate perturb scores
genescore_runi = get_perturb_scores(Y1, y1, X1, w1,
Y2, y2, X2, w2, S1, S2)
genescore_runi.columns = ['gene', 'set{}'.format(count)]
if args.null:
regscore_runi, diff_regs = get_diff_regulatory_activity(
actual_perturb['gene'][:1000],
w1_df, w2_df, top_regs=20)
else:
regscore_runi, diff_regs = get_diff_regulatory_activity(
genescore_runi['gene'][:1000],
w1_df, w2_df, top_regs=20)
regscore_runi.columns = ['gene', 'set{}'.format(count)]
return (genescore_runi, regscore_runi)
def run_epee():
"""To run EPEE with specified inputs."""
logging.info('SAMPLES: Y1: {} | Y2: {}'.format(Y1.shape[1], Y2.shape[1]))
logging.info('Tensorflow: {}'.format(tf.__version__))
logging.info('GENES: {}'.format(Y1.shape[0]))
logging.info('TFs: {}'.format(S1.shape[1]))
logging.info('MODEL LEARNING STARTED')
genescore_df = pd.DataFrame()
regscore_df = pd.DataFrame()
loss_runs = []
y1_s = []
y2_s = []
w1_s = []
w2_s = []
w1S1_s = []
w2S2_s = []
for rid in range(args.runs):
start = time.time()
logging.debug('Tensorflow: {}'.format(tf.__version__))
logging.debug('MODEL: {} learning Y1'.format(rid))
y1, w1, loss_arr1 = run_model(np.array(Y1), np.array(X1),
np.array(S1),
l_reg=args.lregularization,
g_reg=args.gregularization,
step=args.step,
itr=args.iterations,
log_itr=round(args.iterations/20),
seed=rid+args.seed,
model=args.model,
val=condition_val)
logging.debug('MODEL: {} learning Y2'.format(rid))
y2, w2, loss_arr2 = run_model(np.array(Y2), np.array(X2),
np.array(S2),
l_reg=args.lregularization,
g_reg=args.gregularization,
step=args.step,
itr=args.iterations,
log_itr=round(args.iterations/20),
seed=rid+args.seed,
model=args.model,
val=condition_val)
loss_runs.append((rid, loss_arr1[-1], loss_arr2[-1]))
# Calculate w1S1 and w2S2
w1_s1 = np.multiply(w1, S1)
w2_s2 = np.multiply(w2, S2)
w1_df = get_weights_df(w1_s1, Y1.index, X1.index)
w2_df = get_weights_df(w2_s2, Y2.index, X2.index)
w1o_df = get_weights_df(w1, Y1.index, X1.index)
w2o_df = get_weights_df(w2, Y2.index, X2.index)
# Store dataframes
y1_s.append(y1)
y2_s.append(y2)
w1_s.append(w1)
w2_s.append(w2)
w1S1_s.append(w1_df)
w2S2_s.append(w2_df)
# Output inferred weights if args.store_weights is True and args.null is False
if args.store_weights and not args.null:
w1o_df.to_csv('{}/model/w1_{}.txt'.format(outdir, rid),
sep='\t')
w2o_df.to_csv('{}/model/w2_{}.txt'.format(outdir, rid),
sep='\t')
if rid == 0:
S1.to_csv('{}/model/S1_input.txt'.format(outdir),
sep='\t')
S2.to_csv('{}/model/S2_input.txt'.format(outdir),
sep='\t')
X1.to_csv('{}/model/X1_input.txt'.format(outdir),
sep='\t')
X2.to_csv('{}/model/X2_input.txt'.format(outdir),
sep='\t')
Y1.to_csv('{}/model/Y1_input.txt'.format(outdir),
sep='\t')
Y2.to_csv('{}/model/Y2_input.txt'.format(outdir),
sep='\t')
end = time.time()
logging.info('MODEL: {} RUNTIME: {} mins'.format(rid,
round((end-start)/60, 2)))
# For each pairs of inferred weights calculate perturb and regulator scores
# logging.info('CALCULATE PERTURB AND REGULATOR SCORES')
logging.info('SCORES: pairwise comparision of all Y1 and Y2 models')
list_runs = list(range(args.runs))
pairs = list(itertools.product(list_runs, list_runs))
score_inputs = []
for count, p in enumerate(pairs):
m1, m2 = p
score_inputs.append((y1_s[m1], w1_s[m1], w1S1_s[m1],
y2_s[m2], w2_s[m2], w2S2_s[m2],
count))
if args.multiprocess:
cpu_count = multiprocessing.cpu_count()
p = multiprocessing.Pool(int(cpu_count/2))
out = p.map(get_scores, score_inputs)
else:
out = []
for i in score_inputs:
i_out = get_scores(i)
out.append(i_out)
for count, scores in enumerate(out):
genescore_runi, regscore_runi = scores
if count == 0:
genescore_df = genescore_runi.copy()
regscore_df = regscore_runi.copy()
else:
# if np.all(genescore_runi.index == genescore_df.index):
# genescore_df[genescore_runi.columns[1]] = genescore_runi.iloc[:, 1]
# else:
genescore_df =
|
pd.merge(genescore_df, genescore_runi, on='gene')
|
pandas.merge
|
import pandas as pd
LOAD_CAPIQ_CAT_A_INDEX_str = ["2014-12-31 00:00:00", "2015-12-31 00:00:00", "2016-12-31 00:00:00", "2017-12-31 00:00:00", "2018-12-31 00:00:00"]
LOAD_CAPIQ_CAT_A_INDEX = [
|
pd.to_datetime(val)
|
pandas.to_datetime
|
# --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#Code starts here
data =
|
pd.read_csv(path)
|
pandas.read_csv
|
# Title: Data cleaning for improved NEWS2 paper
# Author: <NAME>
# Started: 2020-07-14
import os
from joblib import dump
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
verbose = False
raw = {}
for r in ['bloods', 'outcomes', 'vitals']:
raw[r] = pd.read_csv(os.path.join('data', 'raw', '2020-06-30 Updated 1500',
'1593424203.7842345_AST', r + '.csv'),
low_memory=False)
# Outcomes --------------------------------------------------------------------
oc = raw['outcomes'].rename(columns={'patient_pseudo_id': 'pid',
'Sx Date': 'symp_onset',
'Primary Endpoint': 'primary_end',
'Death Date': 'death_date',
'ITU date': 'itu_date',
'Admit Date': 'admit_date'})
oc.columns = [x.lower() for x in oc.columns]
# Derive BAME
oc['bame'] = np.select([oc['ethnicity'].isin(['Black', 'Asian']),
oc['ethnicity'] == 'Caucasian',
True],
[True, False, pd.NA])
# Derive outcomes for paper ---------------------------------------------------
for i in ['symp_onset', 'death_date', 'itu_date', 'admit_date']:
oc[i] = pd.to_datetime(oc[i])
# Set index date
# If nosocomial, use symptom onset; otherwise use admission date.
oc['nosoc'] = oc['symp_onset'] > oc['admit_date']
oc['index'] = np.where(oc['nosoc'], oc['symp_onset'], oc['admit_date'])
# Define endpoints
oc['end14'] = oc['index'] + pd.DateOffset(days=14)
oc['end3'] = oc['index'] + pd.DateOffset(hours=72)
# Check patients who died/ICU before symptom onset
oc['y_before_onset'] = ((oc['death_date'] < oc['symp_onset']) |
(oc['itu_date'] < oc['symp_onset']))
# Check patients who died/ICU before admission
oc['y_before_admit'] = ((oc['death_date'] < oc['admit_date']) |
(oc['itu_date'] < oc['admit_date']))
# Remove patients who died before admission
oc = oc[~oc['y_before_admit']]
# Define 14-day outcome
latest_extract = pd.to_datetime('2020-05-18')
oc['event14'] = np.select([oc['death_date'] <= oc['end14'],
oc['itu_date'] <= oc['end14'],
oc['end14'] <= latest_extract,
True],
['death', 'itu', 'other', pd.NA])
oc['y14'] = pd.NA
oc['y14'][(oc['event14'] == 'death') | (oc['event14'] == 'itu')] = 1
oc['y14'][(oc['event14'] == 'other')] = 0
# Define 3-day outcome
oc['event3'] = np.select([oc['death_date'] <= oc['end3'],
oc['itu_date'] <= oc['end3'],
oc['end3'] <= latest_extract,
True],
['death', 'itu', 'other', pd.NA])
oc['y3'] = pd.NA
oc['y3'][(oc['event3'] == 'death') | (oc['event3'] == 'itu')] = 1
oc['y3'][(oc['event3'] == 'other')] = 0
# Define survival outcomes ----------------------------------------------------
# Days until death
oc['td_days'] = (oc['death_date'] - oc['index']).dt.days
oc['td_cens'] = (oc['td_days'].isna()) | (oc['td_days'] > 14)
oc['td_days'] = np.select([oc['td_days'].isna(),
oc['td_days'] > 14,
True],
[14, 14, oc['td_days']])
# Days until ICU
oc['ti_days'] = (oc['itu_date'] - oc['index']).dt.days
oc['ti_cens'] = (oc['ti_days'].isna()) | (oc['ti_days'] > 14)
oc['ti_days'] = np.select([oc['ti_days'].isna(),
oc['ti_days'] > 14,
True],
[14, 14, oc['ti_days']])
# Days until death OR ICU
oc['either_date'] = oc[['itu_date', 'death_date']].min(axis=1)
oc['te_days'] = (oc['either_date'] - oc['index']).dt.days
oc['te_cens'] = (oc['te_days'].isna()) | (oc['te_days'] > 14)
oc['te_days'] = np.select([oc['te_days'].isna(),
oc['te_days'] > 14,
True],
[14, 14, oc['te_days']])
# Check that all patients have passed their 14-day endpoint
print(all((oc['end14'] < latest_extract)))
# Define 'number of comorbidities'
numcom = oc[['copd', 'asthma', 'hf',
'diabetes', 'ihd', 'ckd', 'htn']].sum(axis=1)
numcom[numcom > 4] = 4
oc['numcom'] = numcom
# Vitals ----------------------------------------------------------------------
vt = raw['vitals']
vt['ut'] = pd.to_datetime(vt['RECORDED DATE'])
# Derive GCS score
gcs = vt.loc[:, vt.columns.str.startswith('GCS ')].copy()
for v in gcs:
gcs[v] = gcs[v].str.extract('(\d+)').astype(float)
vt['gcs_score'] = gcs.sum(skipna=False, axis=1)
# Create oxygen measures
vt['oxlt'] = vt['Oxygen Litres']
vt['suppox'] = np.select([vt['Supplemental Oxygen'] == 'No (Air)',
vt['Supplemental Oxygen'] == 'Yes',
True],
[False, True, pd.NA])
vt['oxlt'][vt['Supplemental Oxygen'] == 'No (Air)'] = 0
vt['oxord'] = np.select([vt['oxlt'] == 0,
vt['oxlt'] <= 0.5,
vt['oxlt'] <= 1,
vt['oxlt'] <= 2,
vt['oxlt'] <= 3,
vt['oxlt'] <= 5,
vt['oxlt'] <= 10,
True],
[0, 1, 2, 3, 4, 5, 6, 7])
# Select required measures
vt = vt.rename(columns={'patient_pseudo_id': 'pid',
'Temperature': 'temp',
'Oxygen Saturation': 'oxsat',
'Respiration Rate': 'resp',
'Heart Rate': 'hr',
'Systolic BP': 'sbp',
'Diastolic BP': 'dbp',
'NEWS2 score': 'news2'})
keep = ['pid', 'temp', 'oxsat', 'resp', 'hr', 'sbp', 'dbp', 'news2', 'oxlt',
'suppox', 'oxord', 'gcs_score']
vt = vt[['ut'] + keep]
# Pick first non-missing value following hospital admission and symptom onset
vt = vt.merge(oc, how='inner', on='pid')
vt['latest_measure'] = vt['index'] +
|
pd.DateOffset(hours=48)
|
pandas.DateOffset
|
# -*- coding: utf-8 -*-
# @author: Elie
#%% ==========================================================
# Import libraries set library params
# ============================================================
import pandas as pd
import numpy as np
import os
pd.options.mode.chained_assignment = None #Pandas warnings off
#plotting
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib.ticker import Locator
import matplotlib as mpl
# stats
from scipy import stats
#set matplotlib rcparams
mpl.rcParams['savefig.transparent'] = "False"
mpl.rcParams['axes.facecolor'] = "white"
mpl.rcParams['figure.facecolor'] = "white"
mpl.rcParams['font.size'] = "5"
plt.rcParams['savefig.transparent'] = "False"
plt.rcParams['axes.facecolor'] = "white"
plt.rcParams['figure.facecolor'] = "white"
plt.rcParams['font.size'] = "5"
#%% ==========================================================
# define these feature/headers here in case the headers
# are out of order in input files (often the case)
# ============================================================
snv_categories = ["sample",
"A[C>A]A", "A[C>A]C", "A[C>A]G", "A[C>A]T",
"C[C>A]A", "C[C>A]C", "C[C>A]G", "C[C>A]T",
"G[C>A]A", "G[C>A]C", "G[C>A]G", "G[C>A]T",
"T[C>A]A", "T[C>A]C", "T[C>A]G", "T[C>A]T",
"A[C>G]A", "A[C>G]C", "A[C>G]G", "A[C>G]T",
"C[C>G]A", "C[C>G]C", "C[C>G]G", "C[C>G]T",
"G[C>G]A", "G[C>G]C", "G[C>G]G", "G[C>G]T",
"T[C>G]A", "T[C>G]C", "T[C>G]G", "T[C>G]T",
"A[C>T]A", "A[C>T]C", "A[C>T]G", "A[C>T]T",
"C[C>T]A", "C[C>T]C", "C[C>T]G", "C[C>T]T",
"G[C>T]A", "G[C>T]C", "G[C>T]G", "G[C>T]T",
"T[C>T]A", "T[C>T]C", "T[C>T]G", "T[C>T]T",
"A[T>A]A", "A[T>A]C", "A[T>A]G", "A[T>A]T",
"C[T>A]A", "C[T>A]C", "C[T>A]G", "C[T>A]T",
"G[T>A]A", "G[T>A]C", "G[T>A]G", "G[T>A]T",
"T[T>A]A", "T[T>A]C", "T[T>A]G", "T[T>A]T",
"A[T>C]A", "A[T>C]C", "A[T>C]G", "A[T>C]T",
"C[T>C]A", "C[T>C]C", "C[T>C]G", "C[T>C]T",
"G[T>C]A", "G[T>C]C", "G[T>C]G", "G[T>C]T",
"T[T>C]A", "T[T>C]C", "T[T>C]G", "T[T>C]T",
"A[T>G]A", "A[T>G]C", "A[T>G]G", "A[T>G]T",
"C[T>G]A", "C[T>G]C", "C[T>G]G", "C[T>G]T",
"G[T>G]A", "G[T>G]C", "G[T>G]G", "G[T>G]T",
"T[T>G]A", "T[T>G]C", "T[T>G]G", "T[T>G]T"]
indel_categories = ["sample",
"1:Del:C:0", "1:Del:C:1", "1:Del:C:2", "1:Del:C:3", "1:Del:C:4", "1:Del:C:5",
"1:Del:T:0", "1:Del:T:1", "1:Del:T:2", "1:Del:T:3", "1:Del:T:4", "1:Del:T:5",
"1:Ins:C:0", "1:Ins:C:1", "1:Ins:C:2", "1:Ins:C:3", "1:Ins:C:4", "1:Ins:C:5",
"1:Ins:T:0", "1:Ins:T:1", "1:Ins:T:2", "1:Ins:T:3", "1:Ins:T:4", "1:Ins:T:5",
"2:Del:R:0", "2:Del:R:1", "2:Del:R:2", "2:Del:R:3", "2:Del:R:4", "2:Del:R:5",
"3:Del:R:0", "3:Del:R:1", "3:Del:R:2", "3:Del:R:3", "3:Del:R:4", "3:Del:R:5",
"4:Del:R:0", "4:Del:R:1", "4:Del:R:2", "4:Del:R:3", "4:Del:R:4", "4:Del:R:5",
"5:Del:R:0", "5:Del:R:1", "5:Del:R:2", "5:Del:R:3", "5:Del:R:4", "5:Del:R:5",
"2:Ins:R:0", "2:Ins:R:1", "2:Ins:R:2", "2:Ins:R:3", "2:Ins:R:4", "2:Ins:R:5",
"3:Ins:R:0", "3:Ins:R:1", "3:Ins:R:2", "3:Ins:R:3", "3:Ins:R:4", "3:Ins:R:5",
"4:Ins:R:0", "4:Ins:R:1", "4:Ins:R:2", "4:Ins:R:3", "4:Ins:R:4", "4:Ins:R:5",
"5:Ins:R:0", "5:Ins:R:1", "5:Ins:R:2", "5:Ins:R:3", "5:Ins:R:4", "5:Ins:R:5",
"2:Del:M:1", "3:Del:M:1", "3:Del:M:2", "4:Del:M:1", "4:Del:M:2", "4:Del:M:3",
"5:Del:M:1", "5:Del:M:2", "5:Del:M:3", "5:Del:M:4", "5:Del:M:5"]
cnv_categories = ["sample",
"BCper10mb_0", "BCper10mb_1", "BCper10mb_2", "BCper10mb_3",
"CN_0", "CN_1", "CN_2", "CN_3", "CN_4", "CN_5", "CN_6", "CN_7", "CN_8",
"CNCP_0", "CNCP_1", "CNCP_2", "CNCP_3", "CNCP_4", "CNCP_5", "CNCP_6", "CNCP_7",
"BCperCA_0", "BCperCA_1", "BCperCA_2", "BCperCA_3", "BCperCA_4", "BCperCA_5",
"SegSize_0", "SegSize_1", "SegSize_2", "SegSize_3", "SegSize_4", "SegSize_5",
"SegSize_6", "SegSize_7", "SegSize_8", "SegSize_9", "SegSize_10",
"CopyFraction_0", "CopyFraction_1", "CopyFraction_2", "CopyFraction_3", "CopyFraction_4",
"CopyFraction_5", "CopyFraction_6"]
#%% ==========================================================
# make concat sig dataframe
# ============================================================
def load_data(snv_counts_path, indel_counts_path, cnv_counts_path):
df_snv = pd.read_csv(snv_counts_path, sep='\t', low_memory=False)
df_snv = df_snv[snv_categories]
df_snv["sample"] = df_snv["sample"].astype(str)
df_indel = pd.read_csv(indel_counts_path, sep='\t', low_memory=False)
df_indel = df_indel[indel_categories]
df_indel["sample"] = df_indel["sample"].astype(str)
df_cnv = pd.read_csv(cnv_counts_path, sep='\t', low_memory=False)
df_cnv = df_cnv[cnv_categories]
df_cnv["sample"] = df_cnv["sample"].astype(str)
df_sigs = pd.merge(df_snv, df_indel, on="sample", how='left').fillna(0)
df_sigs = pd.merge(df_sigs, df_cnv, on="sample", how='left').reset_index(drop=True)
return df_sigs
#%% ==========================================================
# need to seperately handle minor ticks on sym log axis. Taken from:
# https://stackoverflow.com/questions/20470892/how-to-place-minor-ticks-on-symlog-scale
# ============================================================
class MinorSymLogLocator(Locator):
"""
Dynamically find minor tick positions based on the positions of
major ticks for a symlog scaling.
"""
def __init__(self, linthresh):
"""
Ticks will be placed between the major ticks.
The placement is linear for x between -linthresh and linthresh,
otherwise its logarithmically
"""
self.linthresh = linthresh
def __call__(self):
'Return the locations of the ticks'
majorlocs = self.axis.get_majorticklocs()
# iterate through minor locs
minorlocs = []
# handle the lowest part
for i in range(1, len(majorlocs)):
majorstep = majorlocs[i] - majorlocs[i-1]
if abs(majorlocs[i-1] + majorstep/2) < self.linthresh:
ndivs = 10
else:
ndivs = 9
minorstep = majorstep / ndivs
locs = np.arange(majorlocs[i-1], majorlocs[i], minorstep)[1:]
minorlocs.extend(locs)
return self.raise_if_exceeds(np.array(minorlocs))
def tick_values(self, vmin, vmax):
raise NotImplementedError('Cannot get tick locations for a '
'%s type.' % type(self))
#%% ==========================================================
# some other fig settings that are same for all 4 figs.
# ============================================================
def common_settings(fig, ax):
fig.set_size_inches(3.25, 1.1)
ax.set_xlabel("")
ax.tick_params(axis='y', which="major", length=2, labelsize=6, pad=1, reset=False)
ax.tick_params(axis='x', which="major", length=2, labelsize=7, pad=0, reset=False)
sns.despine(ax=ax, top=True, right=True, left=False, bottom=False)
ax.yaxis.set_label_coords(-0.08, 0.5)
ax.set_xticklabels(["DRwt", "ATMd", "CDK12d", "BRCA2d", "MMRd", "Bladder"])
fig.subplots_adjust(left=0.11, right=0.995, top=0.91, bottom=0.1)
return fig, ax
#%% ==========================================================
# get paths, load data and make df with each file merged
# ============================================================
#file from paths relative to this script
rootdir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
figdir = os.path.join(rootdir, "figures", "fig1")
datadir = os.path.join(rootdir, "data")
cohort_data = os.path.join(datadir, "cohort.tsv")
snv_features = os.path.join(datadir, "tns_features.tsv")
ndl_features = os.path.join(datadir, "ndl_features.tsv")
cnv_features = os.path.join(datadir, "cnv_features.tsv")
sigs = load_data(snv_features, ndl_features, cnv_features)
sample_labels = pd.read_csv(cohort_data, sep='\t', low_memory=False)
# sample_labels = sample_labels[sample_labels['manual check for usefullness (0=Fail)'] != 0]
df =
|
pd.merge(sample_labels, sigs, how='left', on='sample')
|
pandas.merge
|
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt import settings
from vectorbt.utils.random import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
settings.returns['year_freq'] = '252 days' # same as empyrical
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_process_order_nb():
# Errors, ignored and rejected orders
log_record = np.empty(1, dtype=log_dt)[0]
log_record[0] = 0
log_record[1] = 0
log_record[2] = 0
log_record[3] = 0
log_record[-1] = 0
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=0))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=1))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
-100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.nan, 100., 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., np.inf, 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., np.nan, 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=-2), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=20), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=-2), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=20), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., -100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.LongOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.ShortOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=-10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fees=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fees=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, slippage=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, slippage=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, max_size=0), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, max_size=-10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=np.nan), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=2), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., np.nan,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., -10.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., np.inf, 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., -10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., np.nan, 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.ShortOnly), log_record)
assert cash_now == 100.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 1100.,
nb.create_order_nb(size=-np.inf, price=10, direction=Direction.All), log_record)
assert cash_now == 100.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 10., 10., 1100.,
nb.create_order_nb(size=0, price=10), log_record)
assert cash_now == 100.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=15, price=10, max_size=10, allow_partial=False, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=15, price=10, max_size=10, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=1., raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=1.), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.LongOnly), log_record)
assert cash_now == 0.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.All), log_record)
assert cash_now == 0.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.LongOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.All), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 1100.,
nb.create_order_nb(size=-10, price=10, direction=Direction.ShortOnly), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.ShortOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=-np.inf, price=10, direction=Direction.All), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 1100.,
nb.create_order_nb(size=-10, price=10, direction=Direction.LongOnly), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=100, price=10, allow_partial=False, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=100, price=10, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, min_size=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, min_size=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-200, price=10, direction=Direction.LongOnly, allow_partial=False,
raise_reject=True),
log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-200, price=10, direction=Direction.LongOnly, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, fixed_fees=1000, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, fixed_fees=1000), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=10, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 0.
assert shares_now == 8.18181818181818
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=100, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 0.
assert shares_now == 8.18181818181818
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-10, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 180.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-100, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 909.
assert shares_now == -100.
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetShares), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-10, price=10, size_type=SizeType.TargetShares), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=100, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-100, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=0.5, price=10, size_type=SizeType.Percent, fixed_fees=1.), log_record)
assert cash_now == 50.
assert shares_now == 4.9
assert_same_tuple(order_result, OrderResult(
size=4.9, price=10.0, fees=1., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 10., 10., 100.,
nb.create_order_nb(size=-1, price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 10., 10., 100.,
nb.create_order_nb(size=-0.5, price=10, size_type=SizeType.Percent, fixed_fees=1.), log_record)
assert cash_now == 49.
assert shares_now == 5.
assert_same_tuple(order_result, OrderResult(
size=5., price=10.0, fees=1., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 100.,
nb.create_order_nb(size=1., price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 0.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., -10., 10., 100.,
nb.create_order_nb(size=-1., price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 100.
assert shares_now == -20.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=np.inf, price=10), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
150., -5., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=5., price=10.0, fees=0., side=1, status=0, status_info=-1))
# Logging
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., np.nan, 0, 2, np.nan, 0., 0., 0., 0., np.inf, 0.,
True, False, True, 100., 0., np.nan, np.nan, np.nan, -1, 1, 0, 0
))
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=np.inf, price=10, log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., np.inf, 0, 2, 10., 0., 0., 0., 0., np.inf, 0.,
True, False, True, 0., 10., 10., 10., 0., 0, 0, -1, 0
))
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10, log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., -np.inf, 0, 2, 10., 0., 0., 0., 0., np.inf, 0.,
True, False, True, 200., -10., 10., 10., 0., 1, 0, -1, 0
))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_signals ############# #
entries = pd.Series([True, True, True, False, False], index=price.index)
entries_wide = entries.vbt.tile(3, keys=['a', 'b', 'c'])
exits = pd.Series([False, False, True, True, True], index=price.index)
exits_wide = exits.vbt.tile(3, keys=['a', 'b', 'c'])
def from_signals_all(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='all', **kwargs)
def from_signals_longonly(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='longonly', **kwargs)
def from_signals_shortonly(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='shortonly', **kwargs)
class TestFromSignals:
def test_one_column(self):
record_arrays_close(
from_signals_all().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 3, 0, 50., 4., 0., 0)
], dtype=order_dt)
)
portfolio = from_signals_all()
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_signals_all(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 200., 4., 0., 1),
(2, 0, 1, 100., 1., 0., 0), (3, 3, 1, 200., 4., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 3, 2, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 100., 4., 0., 1),
(2, 0, 1, 100., 1., 0., 0), (3, 3, 1, 100., 4., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 3, 2, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 3, 0, 50., 4., 0., 0),
(2, 0, 1, 100., 1., 0., 1), (3, 3, 1, 50., 4., 0., 0),
(4, 0, 2, 100., 1., 0., 1), (5, 3, 2, 50., 4., 0., 0)
], dtype=order_dt)
)
portfolio = from_signals_all(price=price_wide)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_size(self):
record_arrays_close(
from_signals_all(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 2.0, 4.0, 0.0, 1), (4, 0, 3, 100.0, 1.0, 0.0, 0), (5, 3, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 1.0, 4.0, 0.0, 1), (4, 0, 3, 100.0, 1.0, 0.0, 0), (5, 3, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 2, 1.0, 1.0, 0.0, 1),
(3, 3, 2, 1.0, 4.0, 0.0, 0), (4, 0, 3, 100.0, 1.0, 0.0, 1), (5, 3, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
with pytest.raises(Exception) as e_info:
_ = from_signals_all(size=0.5, size_type='percent')
record_arrays_close(
from_signals_all(size=0.5, size_type='percent', close_first=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 3, 0, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(size=0.5, size_type='percent', close_first=True, accumulate=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 12.5, 2., 0., 0),
(2, 3, 0, 31.25, 4., 0., 1), (3, 4, 0, 15.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 3, 0, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=0.5, size_type='percent').order_records,
np.array([], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
price=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 25., 1., 0., 0),
(2, 0, 2, 12.5, 1., 0., 0), (3, 3, 0, 50., 4., 0., 1),
(4, 3, 1, 25., 4., 0., 1), (5, 3, 2, 12.5, 4., 0., 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_signals_all(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 3, 0, 198.01980198019803, 4.04, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099, 1.01, 0., 0), (1, 3, 0, 99.00990099, 4.04, 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 3, 0, 49.504950495049506, 4.04, 0.0, 0)
], dtype=order_dt)
)
def test_fees(self):
record_arrays_close(
from_signals_all(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 2.0, 4.0, 0.8, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 2.0, 4.0, 8.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 1.0, 4.0, 0.4, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 1.0, 4.0, 4.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.1, 1),
(3, 3, 1, 1.0, 4.0, 0.4, 0), (4, 0, 2, 1.0, 1.0, 1.0, 1), (5, 3, 2, 1.0, 4.0, 4.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_signals_all(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 2.0, 4.0, 0.1, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 2.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 1.0, 4.0, 0.1, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 1.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.1, 1),
(3, 3, 1, 1.0, 4.0, 0.1, 0), (4, 0, 2, 1.0, 1.0, 1.0, 1), (5, 3, 2, 1.0, 4.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_signals_all(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.1, 0.0, 0),
(3, 3, 1, 2.0, 3.6, 0.0, 1), (4, 0, 2, 1.0, 2.0, 0.0, 0), (5, 3, 2, 2.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.1, 0.0, 0),
(3, 3, 1, 1.0, 3.6, 0.0, 1), (4, 0, 2, 1.0, 2.0, 0.0, 0), (5, 3, 2, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 0.9, 0.0, 1),
(3, 3, 1, 1.0, 4.4, 0.0, 0), (4, 0, 2, 1.0, 0.0, 0.0, 1), (5, 3, 2, 1.0, 8.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_signals_all(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_signals_all(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 3, 0, 0.5, 4.0, 0.0, 1), (2, 4, 0, 0.5, 5.0, 0.0, 1),
(3, 0, 1, 1.0, 1.0, 0.0, 0), (4, 3, 1, 1.0, 4.0, 0.0, 1), (5, 4, 1, 1.0, 5.0, 0.0, 1),
(6, 0, 2, 1.0, 1.0, 0.0, 0), (7, 3, 2, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 3, 0, 0.5, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1), (4, 0, 2, 1.0, 1.0, 0.0, 0), (5, 3, 2, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 3, 0, 0.5, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0), (4, 0, 2, 1.0, 1.0, 0.0, 1), (5, 3, 2, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_signals_all(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_close_first(self):
record_arrays_close(
from_signals_all(close_first=[[False, True]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 100.0, 4.0, 0.0, 1), (4, 4, 1, 80.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(
price=pd.Series(price.values[::-1], index=price.index),
entries=pd.Series(entries.values[::-1], index=price.index),
exits=pd.Series(exits.values[::-1], index=price.index),
close_first=[[False, True]]
).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1), (1, 3, 0, 100.0, 2.0, 0.0, 0), (2, 0, 1, 20.0, 5.0, 0.0, 1),
(3, 3, 1, 20.0, 2.0, 0.0, 0), (4, 4, 1, 160.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_signals_all(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 1100.0, 4.0, 0.0, 1), (2, 3, 1, 1000.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 3, 0, 275.0, 4.0, 0.0, 0), (2, 0, 1, 1000.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 3, 0, 50.0, 4.0, 0.0, 0), (2, 0, 1, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_signals_all(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 1100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(size=1000, allow_partial=True, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_all(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_longonly(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(size=1000, allow_partial=False, raise_reject=True).order_records
def test_accumulate(self):
record_arrays_close(
from_signals_all(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_log(self):
record_arrays_close(
from_signals_all(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 1.0, 100.0, np.inf, 0, 2, 1.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 0.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 3, 0, 0, 0.0, 100.0, 4.0, 400.0, -np.inf, 0, 2, 4.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 800.0, -100.0, 200.0, 4.0, 0.0, 1, 0, -1, 1)
], dtype=log_dt)
)
def test_conflict_mode(self):
kwargs = dict(
price=price.iloc[:3],
entries=pd.DataFrame([
[True, True, True, True, True],
[True, True, True, True, False],
[True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True],
[False, False, False, False, True],
[True, True, True, True, True]
]),
size=1.,
conflict_mode=[[
'ignore',
'entry',
'exit',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_all(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 0), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 0, 2, 1.0, 1.0, 0.0, 1),
(3, 1, 2, 2.0, 2.0, 0.0, 0), (4, 2, 2, 2.0, 3.0, 0.0, 1), (5, 1, 3, 1.0, 2.0, 0.0, 0),
(6, 2, 3, 2.0, 3.0, 0.0, 1), (7, 1, 4, 1.0, 2.0, 0.0, 1), (8, 2, 4, 2.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 0), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 1, 2, 1.0, 2.0, 0.0, 0),
(3, 2, 2, 1.0, 3.0, 0.0, 1), (4, 1, 3, 1.0, 2.0, 0.0, 0), (5, 2, 3, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 1), (1, 0, 1, 1.0, 1.0, 0.0, 1), (2, 1, 2, 1.0, 2.0, 0.0, 1),
(3, 2, 2, 1.0, 3.0, 0.0, 0), (4, 1, 3, 1.0, 2.0, 0.0, 1), (5, 2, 3, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_init_cash(self):
record_arrays_close(
from_signals_all(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 3, 0, 1.0, 4.0, 0.0, 1), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 3, 1, 2.0, 4.0, 0.0, 1),
(3, 0, 2, 1.0, 1.0, 0.0, 0), (4, 3, 2, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 1, 1.0, 1.0, 0.0, 0), (1, 3, 1, 1.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 0.25, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 0.5, 4.0, 0.0, 0), (4, 0, 2, 1.0, 1.0, 0.0, 1), (5, 3, 2, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception) as e_info:
_ = from_signals_all(init_cash=np.inf).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_longonly(init_cash=np.inf).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(init_cash=np.inf).order_records
def test_group_by(self):
portfolio = from_signals_all(price=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 200.0, 4.0, 0.0, 1), (4, 0, 2, 100.0, 1.0, 0.0, 0), (5, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not portfolio.cash_sharing
def test_cash_sharing(self):
portfolio = from_signals_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 3, 1, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert portfolio.cash_sharing
with pytest.raises(Exception) as e_info:
_ = portfolio.regroup(group_by=False)
def test_call_seq(self):
portfolio = from_signals_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 3, 1, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
portfolio = from_signals_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 3, 1, 200.0, 4.0, 0.0, 1), (2, 3, 0, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
portfolio = from_signals_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 3, 1, 200.0, 4.0, 0.0, 1), (2, 3, 0, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
price=1.,
entries=pd.DataFrame([
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
[False, True, False],
]),
exits=pd.DataFrame([
[False, False, False],
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
portfolio = from_signals_all(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 200.0, 1.0, 0.0, 1), (2, 1, 1, 200.0, 1.0, 0.0, 0),
(3, 2, 1, 400.0, 1.0, 0.0, 1), (4, 2, 0, 400.0, 1.0, 0.0, 0), (5, 3, 0, 800.0, 1.0, 0.0, 1),
(6, 3, 2, 800.0, 1.0, 0.0, 0), (7, 4, 2, 1400.0, 1.0, 0.0, 1), (8, 4, 1, 1400.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_signals_longonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 2, 1, 100.0, 1.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 3, 0, 100.0, 1.0, 0.0, 1),
(6, 3, 2, 100.0, 1.0, 0.0, 0), (7, 4, 2, 100.0, 1.0, 0.0, 1), (8, 4, 1, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_signals_shortonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 1), (1, 1, 1, 200.0, 1.0, 0.0, 1), (2, 1, 2, 100.0, 1.0, 0.0, 0),
(3, 2, 0, 300.0, 1.0, 0.0, 1), (4, 2, 1, 200.0, 1.0, 0.0, 0), (5, 3, 2, 400.0, 1.0, 0.0, 1),
(6, 3, 0, 300.0, 1.0, 0.0, 0), (7, 4, 1, 500.0, 1.0, 0.0, 1), (8, 4, 2, 400.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
portfolio = from_signals_longonly(**kwargs, size=1., size_type='percent')
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 2, 1, 100.0, 1.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 3, 0, 100.0, 1.0, 0.0, 1),
(6, 3, 2, 100.0, 1.0, 0.0, 0), (7, 4, 2, 100.0, 1.0, 0.0, 1), (8, 4, 1, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 0, 1]
])
)
def test_max_orders(self):
_ = from_signals_all(price=price_wide)
_ = from_signals_all(price=price_wide, max_orders=6)
with pytest.raises(Exception) as e_info:
_ = from_signals_all(price=price_wide, max_orders=5)
def test_max_logs(self):
_ = from_signals_all(price=price_wide, log=True)
_ = from_signals_all(price=price_wide, log=True, max_logs=6)
with pytest.raises(Exception) as e_info:
_ = from_signals_all(price=price_wide, log=True, max_logs=5)
# ############# from_holding ############# #
class TestFromHolding:
def test_from_holding(self):
record_arrays_close(
vbt.Portfolio.from_holding(price).order_records,
vbt.Portfolio.from_signals(price, True, False, accumulate=False).order_records
)
# ############# from_random_signals ############# #
class TestFromRandom:
def test_from_random_n(self):
result = vbt.Portfolio.from_random_signals(price, n=2, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, True, False, False],
[False, True, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, n=[1, 2], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [True, False], [False, True], [False, False], [False, False]],
[[False, False], [False, True], [False, False], [False, True], [True, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.Int64Index([1, 2], dtype='int64', name='rand_n')
)
def test_from_random_prob(self):
result = vbt.Portfolio.from_random_signals(price, prob=0.5, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, False, False, False],
[False, False, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, prob=[0.25, 0.5], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [False, False], [False, False], [False, False], [True, False]],
[[False, False], [False, True], [False, False], [False, False], [False, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.MultiIndex.from_tuples([(0.25, 0.25), (0.5, 0.5)], names=['rprob_entry_prob', 'rprob_exit_prob'])
)
# ############# from_orders ############# #
order_size = pd.Series([np.inf, -np.inf, np.nan, np.inf, -np.inf], index=price.index)
order_size_wide = order_size.vbt.tile(3, keys=['a', 'b', 'c'])
order_size_one = pd.Series([1, -1, np.nan, 1, -1], index=price.index)
def from_orders_all(price=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(price, size, direction='all', **kwargs)
def from_orders_longonly(price=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(price, size, direction='longonly', **kwargs)
def from_orders_shortonly(price=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(price, size, direction='shortonly', **kwargs)
class TestFromOrders:
def test_one_column(self):
record_arrays_close(
from_orders_all().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 1, 0, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
portfolio = from_orders_all()
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_orders_all(price=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0),
(3, 0, 1, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 3, 1, 100.0, 4.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1), (8, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1), (4, 0, 1, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 3, 1, 50.0, 4.0, 0.0, 0), (7, 4, 1, 50.0, 5.0, 0.0, 1), (8, 0, 2, 100.0, 1.0, 0.0, 0),
(9, 1, 2, 100.0, 2.0, 0.0, 1), (10, 3, 2, 50.0, 4.0, 0.0, 0), (11, 4, 2, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 1, 0, 100.0, 2.0, 0.0, 0), (2, 0, 1, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0), (4, 0, 2, 100.0, 1.0, 0.0, 1), (5, 1, 2, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
portfolio = from_orders_all(price=price_wide)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_size_inf(self):
record_arrays_close(
from_orders_all(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_orders_all(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 1, 0, 198.01980198019803, 2.02, 0.0, 1),
(2, 3, 0, 99.00990099009901, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 1, 0, 99.00990099009901, 2.02, 0.0, 1),
(2, 3, 0, 49.504950495049506, 4.04, 0.0, 0), (3, 4, 0, 49.504950495049506, 5.05, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 1, 0, 99.00990099009901, 2.02, 0.0, 0)
], dtype=order_dt)
)
def test_fees(self):
record_arrays_close(
from_orders_all(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 3, 1, 1.0, 4.0, 0.4, 0), (7, 4, 1, 1.0, 5.0, 0.5, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 2.0, 1), (10, 3, 2, 1.0, 4.0, 4.0, 0), (11, 4, 2, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 3, 1, 1.0, 4.0, 0.4, 0), (7, 4, 1, 1.0, 5.0, 0.5, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 2.0, 1), (10, 3, 2, 1.0, 4.0, 4.0, 0), (11, 4, 2, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.2, 0),
(6, 3, 1, 1.0, 4.0, 0.4, 1), (7, 4, 1, 1.0, 5.0, 0.5, 0), (8, 0, 2, 1.0, 1.0, 1.0, 1),
(9, 1, 2, 1.0, 2.0, 2.0, 0), (10, 3, 2, 1.0, 4.0, 4.0, 1), (11, 4, 2, 1.0, 5.0, 5.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_orders_all(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 3, 1, 1.0, 4.0, 0.1, 0), (7, 4, 1, 1.0, 5.0, 0.1, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 1.0, 1), (10, 3, 2, 1.0, 4.0, 1.0, 0), (11, 4, 2, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 3, 1, 1.0, 4.0, 0.1, 0), (7, 4, 1, 1.0, 5.0, 0.1, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 1.0, 1), (10, 3, 2, 1.0, 4.0, 1.0, 0), (11, 4, 2, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.1, 0),
(6, 3, 1, 1.0, 4.0, 0.1, 1), (7, 4, 1, 1.0, 5.0, 0.1, 0), (8, 0, 2, 1.0, 1.0, 1.0, 1),
(9, 1, 2, 1.0, 2.0, 1.0, 0), (10, 3, 2, 1.0, 4.0, 1.0, 1), (11, 4, 2, 1.0, 5.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_orders_all(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 3, 1, 1.0, 4.4, 0.0, 0), (7, 4, 1, 1.0, 4.5, 0.0, 1), (8, 0, 2, 1.0, 2.0, 0.0, 0),
(9, 1, 2, 1.0, 0.0, 0.0, 1), (10, 3, 2, 1.0, 8.0, 0.0, 0), (11, 4, 2, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 3, 1, 1.0, 4.4, 0.0, 0), (7, 4, 1, 1.0, 4.5, 0.0, 1), (8, 0, 2, 1.0, 2.0, 0.0, 0),
(9, 1, 2, 1.0, 0.0, 0.0, 1), (10, 3, 2, 1.0, 8.0, 0.0, 0), (11, 4, 2, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 0.9, 0.0, 1), (5, 1, 1, 1.0, 2.2, 0.0, 0),
(6, 3, 1, 1.0, 3.6, 0.0, 1), (7, 4, 1, 1.0, 5.5, 0.0, 0), (8, 0, 2, 1.0, 0.0, 0.0, 1),
(9, 1, 2, 1.0, 4.0, 0.0, 0), (10, 3, 2, 1.0, 0.0, 0.0, 1), (11, 4, 2, 1.0, 10.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_orders_all(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 3, 1, 1.0, 4.0, 0.0, 1), (7, 4, 1, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_orders_all(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 1, 0, 0.5, 2.0, 0.0, 1), (2, 3, 0, 0.5, 4.0, 0.0, 0),
(3, 4, 0, 0.5, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1), (8, 0, 2, 1.0, 1.0, 0.0, 0),
(9, 1, 2, 1.0, 2.0, 0.0, 1), (10, 3, 2, 1.0, 4.0, 0.0, 0), (11, 4, 2, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 1, 0, 0.5, 2.0, 0.0, 1), (2, 3, 0, 0.5, 4.0, 0.0, 0),
(3, 4, 0, 0.5, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1), (8, 0, 2, 1.0, 1.0, 0.0, 0),
(9, 1, 2, 1.0, 2.0, 0.0, 1), (10, 3, 2, 1.0, 4.0, 0.0, 0), (11, 4, 2, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 1, 0, 0.5, 2.0, 0.0, 0), (2, 3, 0, 0.5, 4.0, 0.0, 1),
(3, 4, 0, 0.5, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 3, 1, 1.0, 4.0, 0.0, 1), (7, 4, 1, 1.0, 5.0, 0.0, 0), (8, 0, 2, 1.0, 1.0, 0.0, 1),
(9, 1, 2, 1.0, 2.0, 0.0, 0), (10, 3, 2, 1.0, 4.0, 0.0, 1), (11, 4, 2, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_orders_all(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 1, 1, 1.0, 2.0, 0.0, 1), (5, 3, 1, 1.0, 4.0, 0.0, 0),
(6, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 3, 1, 1.0, 4.0, 0.0, 0), (5, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 3, 1, 1.0, 4.0, 0.0, 1), (5, 4, 1, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_orders_all(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 1000.0, 2.0, 0.0, 1), (2, 3, 0, 500.0, 4.0, 0.0, 0),
(3, 4, 0, 1000.0, 5.0, 0.0, 1), (4, 1, 1, 1000.0, 2.0, 0.0, 1), (5, 4, 1, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 1, 0, 550.0, 2.0, 0.0, 0), (2, 3, 0, 1000.0, 4.0, 0.0, 1),
(3, 4, 0, 800.0, 5.0, 0.0, 0), (4, 0, 1, 1000.0, 1.0, 0.0, 1), (5, 3, 1, 1000.0, 4.0, 0.0, 1),
(6, 4, 1, 1000.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0),
(3, 0, 1, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 3, 1, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1), (4, 0, 1, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 3, 1, 50.0, 4.0, 0.0, 0), (7, 4, 1, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 1, 0, 100.0, 2.0, 0.0, 0), (2, 0, 1, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_orders_all(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 1000.0, 2.0, 0.0, 1), (2, 3, 0, 500.0, 4.0, 0.0, 0),
(3, 4, 0, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 1, 0, 550.0, 2.0, 0.0, 0), (2, 3, 0, 1000.0, 4.0, 0.0, 1),
(3, 4, 0, 800.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception) as e_info:
_ = from_orders_all(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_orders_longonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_orders_shortonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_orders_all(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 1.0, 100.0, np.inf, 0, 2, 1.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 0.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 1, 0, 0, 0.0, 100.0, 2.0, 200.0, -np.inf, 0, 2, 2.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 400.0, -100.0, 200.0, 2.0, 0.0, 1, 0, -1, 1),
(2, 2, 0, 0, 400.0, -100.0, 3.0, 100.0, np.nan, 0, 2, 3.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 400.0, -100.0, np.nan, np.nan, np.nan, -1, 1, 0, -1),
(3, 3, 0, 0, 400.0, -100.0, 4.0, 0.0, np.inf, 0, 2, 4.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 0.0, 0.0, 100.0, 4.0, 0.0, 0, 0, -1, 2),
(4, 4, 0, 0, 0.0, 0.0, 5.0, 0.0, -np.inf, 0, 2, 5.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 0.0, 0.0, np.nan, np.nan, np.nan, -1, 2, 6, -1)
], dtype=log_dt)
)
def test_group_by(self):
portfolio = from_orders_all(price=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0),
(3, 0, 1, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 3, 1, 100.0, 4.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1), (8, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not portfolio.cash_sharing
def test_cash_sharing(self):
portfolio = from_orders_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 1, 1, 200.0, 2.0, 0.0, 1),
(3, 3, 0, 200.0, 4.0, 0.0, 0), (4, 4, 0, 200.0, 5.0, 0.0, 1), (5, 0, 2, 100.0, 1.0, 0.0, 0),
(6, 1, 2, 200.0, 2.0, 0.0, 1), (7, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert portfolio.cash_sharing
with pytest.raises(Exception) as e_info:
_ = portfolio.regroup(group_by=False)
def test_call_seq(self):
portfolio = from_orders_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 1, 1, 200.0, 2.0, 0.0, 1),
(3, 3, 0, 200.0, 4.0, 0.0, 0), (4, 4, 0, 200.0, 5.0, 0.0, 1), (5, 0, 2, 100.0, 1.0, 0.0, 0),
(6, 1, 2, 200.0, 2.0, 0.0, 1), (7, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
portfolio = from_orders_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 1, 1, 200.0, 2.0, 0.0, 1), (2, 1, 0, 200.0, 2.0, 0.0, 1),
(3, 3, 1, 200.0, 4.0, 0.0, 0), (4, 4, 1, 200.0, 5.0, 0.0, 1), (5, 0, 2, 100.0, 1.0, 0.0, 0),
(6, 1, 2, 200.0, 2.0, 0.0, 1), (7, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
portfolio = from_orders_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 1, 1, 200.0, 2.0, 0.0, 1), (2, 3, 1, 100.0, 4.0, 0.0, 0),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 1, 2, 200.0, 2.0, 0.0, 1), (5, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
price=1.,
size=pd.DataFrame([
[0., 0., np.inf],
[0., np.inf, -np.inf],
[np.inf, -np.inf, 0.],
[-np.inf, 0., np.inf],
[0., np.inf, -np.inf],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
portfolio = from_orders_all(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 200.0, 1.0, 0.0, 1), (2, 1, 1, 200.0, 1.0, 0.0, 0),
(3, 2, 1, 400.0, 1.0, 0.0, 1), (4, 2, 0, 400.0, 1.0, 0.0, 0), (5, 3, 0, 800.0, 1.0, 0.0, 1),
(6, 3, 2, 800.0, 1.0, 0.0, 0), (7, 4, 2, 1400.0, 1.0, 0.0, 1), (8, 4, 1, 1400.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_orders_longonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 2, 1, 100.0, 1.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 3, 0, 100.0, 1.0, 0.0, 1),
(6, 3, 2, 100.0, 1.0, 0.0, 0), (7, 4, 2, 100.0, 1.0, 0.0, 1), (8, 4, 1, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_orders_shortonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 1), (1, 1, 1, 200.0, 1.0, 0.0, 1), (2, 1, 2, 100.0, 1.0, 0.0, 0),
(3, 2, 0, 300.0, 1.0, 0.0, 1), (4, 2, 1, 200.0, 1.0, 0.0, 0), (5, 3, 2, 400.0, 1.0, 0.0, 1),
(6, 3, 0, 300.0, 1.0, 0.0, 0), (7, 4, 1, 500.0, 1.0, 0.0, 1), (8, 4, 2, 400.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
def test_target_shares(self):
record_arrays_close(
from_orders_all(size=[[75., -75.]], size_type='targetshares').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 0, 1, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[75., -75.]], size_type='targetshares').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[75., -75.]], size_type='targetshares').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=75., size_type='targetshares',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_value(self):
record_arrays_close(
from_orders_all(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 2.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 3.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 4.0, 0.0, 1),
(4, 4, 0, 2.5, 5.0, 0.0, 1), (5, 0, 1, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 25.0, 2.0, 0.0, 0), (7, 2, 1, 8.333333333333332, 3.0, 0.0, 0),
(8, 3, 1, 4.166666666666668, 4.0, 0.0, 0), (9, 4, 1, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 2.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 3.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 4.0, 0.0, 1),
(4, 4, 0, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 1, 0, 25.0, 2.0, 0.0, 0),
(2, 2, 0, 8.333333333333332, 3.0, 0.0, 0), (3, 3, 0, 4.166666666666668, 4.0, 0.0, 0),
(4, 4, 0, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=50., size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 50.0, 1.0, 0.0, 0),
(2, 1, 0, 25.0, 2.0, 0.0, 1), (3, 1, 1, 25.0, 2.0, 0.0, 1),
(4, 1, 2, 25.0, 2.0, 0.0, 0), (5, 2, 0, 8.333333333333332, 3.0, 0.0, 1),
(6, 2, 1, 8.333333333333332, 3.0, 0.0, 1), (7, 2, 2, 8.333333333333332, 3.0, 0.0, 1),
(8, 3, 0, 4.166666666666668, 4.0, 0.0, 1), (9, 3, 1, 4.166666666666668, 4.0, 0.0, 1),
(10, 3, 2, 4.166666666666668, 4.0, 0.0, 1), (11, 4, 0, 2.5, 5.0, 0.0, 1),
(12, 4, 1, 2.5, 5.0, 0.0, 1), (13, 4, 2, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
def test_target_percent(self):
record_arrays_close(
from_orders_all(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 12.5, 2.0, 0.0, 1), (2, 2, 0, 6.25, 3.0, 0.0, 1),
(3, 3, 0, 3.90625, 4.0, 0.0, 1), (4, 4, 0, 2.734375, 5.0, 0.0, 1), (5, 0, 1, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 37.5, 2.0, 0.0, 0), (7, 2, 1, 6.25, 3.0, 0.0, 0), (8, 3, 1, 2.34375, 4.0, 0.0, 0),
(9, 4, 1, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 12.5, 2.0, 0.0, 1), (2, 2, 0, 6.25, 3.0, 0.0, 1),
(3, 3, 0, 3.90625, 4.0, 0.0, 1), (4, 4, 0, 2.734375, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 1, 0, 37.5, 2.0, 0.0, 0), (2, 2, 0, 6.25, 3.0, 0.0, 0),
(3, 3, 0, 2.34375, 4.0, 0.0, 0), (4, 4, 0, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=0.5, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 50.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
record_arrays_close(
from_orders_all(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 12.5, 2., 0., 0),
(2, 2, 0, 4.16666667, 3., 0., 0), (3, 3, 0, 1.5625, 4., 0., 0),
(4, 4, 0, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 12.5, 2., 0., 0),
(2, 2, 0, 4.16666667, 3., 0., 0), (3, 3, 0, 1.5625, 4., 0., 0),
(4, 4, 0, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 5.00000000e+01, 1., 0., 0), (1, 0, 1, 2.50000000e+01, 1., 0., 0),
(2, 0, 2, 1.25000000e+01, 1., 0., 0), (3, 1, 0, 3.12500000e+00, 2., 0., 0),
(4, 1, 1, 1.56250000e+00, 2., 0., 0), (5, 1, 2, 7.81250000e-01, 2., 0., 0),
(6, 2, 0, 2.60416667e-01, 3., 0., 0), (7, 2, 1, 1.30208333e-01, 3., 0., 0),
(8, 2, 2, 6.51041667e-02, 3., 0., 0), (9, 3, 0, 2.44140625e-02, 4., 0., 0),
(10, 3, 1, 1.22070312e-02, 4., 0., 0), (11, 3, 2, 6.10351562e-03, 4., 0., 0),
(12, 4, 0, 2.44140625e-03, 5., 0., 0), (13, 4, 1, 1.22070312e-03, 5., 0., 0),
(14, 4, 2, 6.10351562e-04, 5., 0., 0)
], dtype=order_dt)
)
def test_auto_seq(self):
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
pd.testing.assert_frame_equal(
from_orders_all(
price=1., size=target_hold_value, size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').holding_value(group_by=False),
target_hold_value
)
pd.testing.assert_frame_equal(
from_orders_all(
price=1., size=target_hold_value / 100, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').holding_value(group_by=False),
target_hold_value
)
def test_max_orders(self):
_ = from_orders_all(price=price_wide)
_ = from_orders_all(price=price_wide, max_orders=9)
with pytest.raises(Exception) as e_info:
_ = from_orders_all(price=price_wide, max_orders=8)
def test_max_logs(self):
_ = from_orders_all(price=price_wide, log=True)
_ = from_orders_all(price=price_wide, log=True, max_logs=15)
with pytest.raises(Exception) as e_info:
_ = from_orders_all(price=price_wide, log=True, max_logs=14)
# ############# from_order_func ############# #
@njit
def order_func_nb(oc, size):
return nb.create_order_nb(size=size if oc.i % 2 == 0 else -size, price=oc.close[oc.i, oc.col])
@njit
def log_order_func_nb(oc, size):
return nb.create_order_nb(size=size if oc.i % 2 == 0 else -size, price=oc.close[oc.i, oc.col], log=True)
class TestFromOrderFunc:
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_one_column(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(price.tolist(), order_func_nb, np.inf, row_wise=test_row_wise)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(4, 4, 0, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
portfolio = vbt.Portfolio.from_order_func(price, order_func_nb, np.inf, row_wise=test_row_wise)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(4, 4, 0, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_multiple_columns(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(price_wide, order_func_nb, np.inf, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 0),
(2, 0, 2, 100.0, 1.0, 0.0, 0), (3, 1, 0, 200.0, 2.0, 0.0, 1),
(4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 2, 200.0, 2.0, 0.0, 1),
(6, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (7, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(10, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (11, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(12, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (13, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(4, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (5, 0, 1, 100.0, 1.0, 0.0, 0),
(6, 1, 1, 200.0, 2.0, 0.0, 1), (7, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(8, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(10, 0, 2, 100.0, 1.0, 0.0, 0), (11, 1, 2, 200.0, 2.0, 0.0, 1),
(12, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (13, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_target_shape(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5,), row_wise=test_row_wise)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 1), row_wise=test_row_wise)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64', name='iteration_idx')
)
assert portfolio.wrapper.ndim == 2
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 1), row_wise=test_row_wise,
keys=pd.Index(['first'], name='custom'))
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['first'], dtype='object', name='custom')
)
assert portfolio.wrapper.ndim == 2
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 3), row_wise=test_row_wise)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0, 1, 2], dtype='int64', name='iteration_idx')
)
assert portfolio.wrapper.ndim == 2
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 3), row_wise=test_row_wise,
keys=pd.Index(['first', 'second', 'third'], name='custom'))
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['first', 'second', 'third'], dtype='object', name='custom')
)
assert portfolio.wrapper.ndim == 2
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_group_by(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf,
group_by=np.array([0, 0, 1]), row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 0),
(2, 0, 2, 100.0, 1.0, 0.0, 0), (3, 1, 0, 200.0, 2.0, 0.0, 1),
(4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 2, 200.0, 2.0, 0.0, 1),
(6, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (7, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(10, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (11, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(12, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (13, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 0),
(2, 1, 0, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (5, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(6, 3, 0, 66.66666666666669, 4.0, 0.0, 1), (7, 3, 1, 66.66666666666669, 4.0, 0.0, 1),
(8, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (9, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(10, 0, 2, 100.0, 1.0, 0.0, 0), (11, 1, 2, 200.0, 2.0, 0.0, 1),
(12, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (13, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not portfolio.cash_sharing
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_cash_sharing(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf,
group_by=np.array([0, 0, 1]), cash_sharing=True, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 1.0, 0.0, 0),
(2, 1, 0, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 2, 200.0, 2.0, 0.0, 1), (5, 2, 0, 266.6666666666667, 3.0, 0.0, 0),
(6, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (7, 3, 0, 333.33333333333337, 4.0, 0.0, 1),
(8, 3, 2, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 0, 266.6666666666667, 5.0, 0.0, 0),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 1, 1, 200.0, 2.0, 0.0, 1), (3, 2, 0, 266.6666666666667, 3.0, 0.0, 0),
(4, 3, 0, 333.33333333333337, 4.0, 0.0, 1), (5, 4, 0, 266.6666666666667, 5.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert portfolio.cash_sharing
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_call_seq(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 1.0, 0.0, 0),
(2, 1, 0, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 2, 200.0, 2.0, 0.0, 1), (5, 2, 0, 266.6666666666667, 3.0, 0.0, 0),
(6, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (7, 3, 0, 333.33333333333337, 4.0, 0.0, 1),
(8, 3, 2, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 0, 266.6666666666667, 5.0, 0.0, 0),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 1, 1, 200.0, 2.0, 0.0, 1), (3, 2, 0, 266.6666666666667, 3.0, 0.0, 0),
(4, 3, 0, 333.33333333333337, 4.0, 0.0, 1), (5, 4, 0, 266.6666666666667, 5.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed', row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 1.0, 0.0, 0),
(2, 1, 1, 200.0, 2.0, 0.0, 1), (3, 1, 0, 200.0, 2.0, 0.0, 1),
(4, 1, 2, 200.0, 2.0, 0.0, 1), (5, 2, 1, 266.6666666666667, 3.0, 0.0, 0),
(6, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (7, 3, 1, 333.33333333333337, 4.0, 0.0, 1),
(8, 3, 2, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 1, 266.6666666666667, 5.0, 0.0, 0),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 1, 1, 200.0, 2.0, 0.0, 1),
(2, 1, 0, 200.0, 2.0, 0.0, 1), (3, 2, 1, 266.6666666666667, 3.0, 0.0, 0),
(4, 3, 1, 333.33333333333337, 4.0, 0.0, 1), (5, 4, 1, 266.6666666666667, 5.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 1.0, 0.0, 0),
(2, 1, 1, 200.0, 2.0, 0.0, 1), (3, 1, 2, 200.0, 2.0, 0.0, 1),
(4, 2, 1, 133.33333333333334, 3.0, 0.0, 0), (5, 2, 2, 133.33333333333334, 3.0, 0.0, 0),
(6, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (7, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(8, 3, 2, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 1, 106.6666666666667, 5.0, 0.0, 0),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 1, 1, 200.0, 2.0, 0.0, 1),
(2, 2, 1, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 1, 66.66666666666669, 4.0, 0.0, 1),
(4, 3, 0, 66.66666666666669, 4.0, 0.0, 1), (5, 4, 1, 106.6666666666667, 5.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
with pytest.raises(Exception) as e_info:
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='auto', row_wise=test_row_wise
)
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
@njit
def segment_prep_func_nb(sc, target_hold_value):
order_size = np.copy(target_hold_value[sc.i, sc.from_col:sc.to_col])
order_size_type = np.full(sc.group_len, SizeType.TargetValue)
direction = np.full(sc.group_len, Direction.All)
order_value_out = np.empty(sc.group_len, dtype=np.float_)
sc.last_val_price[sc.from_col:sc.to_col] = sc.close[sc.i, sc.from_col:sc.to_col]
nb.sort_call_seq_nb(sc, order_size, order_size_type, direction, order_value_out)
return order_size, order_size_type, direction
@njit
def pct_order_func_nb(oc, order_size, order_size_type, direction):
col_i = oc.call_seq_now[oc.call_idx]
return nb.create_order_nb(
size=order_size[col_i],
size_type=order_size_type[col_i],
price=oc.close[oc.i, col_i],
direction=direction[col_i]
)
portfolio = vbt.Portfolio.from_order_func(
price_wide * 0 + 1, pct_order_func_nb, group_by=np.array([0, 0, 0]),
cash_sharing=True, segment_prep_func_nb=segment_prep_func_nb,
segment_prep_args=(target_hold_value.values,), row_wise=test_row_wise)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 1, 0],
[0, 2, 1],
[1, 0, 2],
[2, 1, 0]
])
)
pd.testing.assert_frame_equal(
portfolio.holding_value(group_by=False),
target_hold_value
)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_target_value(self, test_row_wise):
@njit
def target_val_segment_prep_func_nb(sc, val_price):
sc.last_val_price[sc.from_col:sc.to_col] = val_price[sc.i]
return ()
@njit
def target_val_order_func_nb(oc):
return nb.create_order_nb(size=50., size_type=SizeType.TargetValue, price=oc.close[oc.i, oc.col])
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb,
segment_prep_func_nb=target_val_segment_prep_func_nb,
segment_prep_args=(price.iloc[:-1].values,), row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 4.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 4.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_target_percent(self, test_row_wise):
@njit
def target_pct_segment_prep_func_nb(sc, val_price):
sc.last_val_price[sc.from_col:sc.to_col] = val_price[sc.i]
return ()
@njit
def target_pct_order_func_nb(oc):
return nb.create_order_nb(size=0.5, size_type=SizeType.TargetPercent, price=oc.close[oc.i, oc.col])
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb,
segment_prep_func_nb=target_pct_segment_prep_func_nb,
segment_prep_args=(price.iloc[:-1].values,), row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 3, 0, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 3, 0, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_init_cash(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=[1., 10., np.inf])
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 10.0, 1.0, 0.0, 0),
(2, 0, 2, 10.0, 1.0, 0.0, 0), (3, 1, 0, 10.0, 2.0, 0.0, 1),
(4, 1, 1, 10.0, 2.0, 0.0, 1), (5, 1, 2, 10.0, 2.0, 0.0, 1),
(6, 2, 0, 6.666666666666667, 3.0, 0.0, 0), (7, 2, 1, 6.666666666666667, 3.0, 0.0, 0),
(8, 2, 2, 10.0, 3.0, 0.0, 0), (9, 3, 0, 10.0, 4.0, 0.0, 1),
(10, 3, 1, 10.0, 4.0, 0.0, 1), (11, 3, 2, 10.0, 4.0, 0.0, 1),
(12, 4, 0, 8.0, 5.0, 0.0, 0), (13, 4, 1, 8.0, 5.0, 0.0, 0),
(14, 4, 2, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 10.0, 2.0, 0.0, 1),
(2, 2, 0, 6.666666666666667, 3.0, 0.0, 0), (3, 3, 0, 10.0, 4.0, 0.0, 1),
(4, 4, 0, 8.0, 5.0, 0.0, 0), (5, 0, 1, 10.0, 1.0, 0.0, 0),
(6, 1, 1, 10.0, 2.0, 0.0, 1), (7, 2, 1, 6.666666666666667, 3.0, 0.0, 0),
(8, 3, 1, 10.0, 4.0, 0.0, 1), (9, 4, 1, 8.0, 5.0, 0.0, 0),
(10, 0, 2, 10.0, 1.0, 0.0, 0), (11, 1, 2, 10.0, 2.0, 0.0, 1),
(12, 2, 2, 10.0, 3.0, 0.0, 0), (13, 3, 2, 10.0, 4.0, 0.0, 1),
(14, 4, 2, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
assert type(portfolio._init_cash) == np.ndarray
base_portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=np.inf)
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=InitCashMode.Auto)
record_arrays_close(
portfolio.order_records,
base_portfolio.orders.values
)
assert portfolio._init_cash == InitCashMode.Auto
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=InitCashMode.AutoAlign)
record_arrays_close(
portfolio.order_records,
base_portfolio.orders.values
)
assert portfolio._init_cash == InitCashMode.AutoAlign
def test_func_calls(self):
@njit
def prep_func_nb(simc, call_i, sim_lst):
call_i[0] += 1
sim_lst.append(call_i[0])
return (call_i,)
@njit
def group_prep_func_nb(gc, call_i, group_lst):
call_i[0] += 1
group_lst.append(call_i[0])
return (call_i,)
@njit
def segment_prep_func_nb(sc, call_i, segment_lst):
call_i[0] += 1
segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(oc, call_i, order_lst):
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
group_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
group_prep_func_nb=group_prep_func_nb, group_prep_args=(group_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,)
)
assert call_i[0] == 28
assert list(sim_lst) == [1]
assert list(group_lst) == [2, 18]
assert list(segment_lst) == [3, 6, 9, 12, 15, 19, 21, 23, 25, 27]
assert list(order_lst) == [4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 20, 22, 24, 26, 28]
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
group_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
active_mask = np.array([
[False, True],
[False, False],
[False, True],
[False, False],
[False, True],
])
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
group_prep_func_nb=group_prep_func_nb, group_prep_args=(group_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,),
active_mask=active_mask
)
assert call_i[0] == 8
assert list(sim_lst) == [1]
assert list(group_lst) == [2]
assert list(segment_lst) == [3, 5, 7]
assert list(order_lst) == [4, 6, 8]
def test_func_calls_row_wise(self):
@njit
def prep_func_nb(simc, call_i, sim_lst):
call_i[0] += 1
sim_lst.append(call_i[0])
return (call_i,)
@njit
def row_prep_func_nb(gc, call_i, row_lst):
call_i[0] += 1
row_lst.append(call_i[0])
return (call_i,)
@njit
def segment_prep_func_nb(sc, call_i, segment_lst):
call_i[0] += 1
segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(oc, call_i, order_lst):
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
row_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
row_prep_func_nb=row_prep_func_nb, row_prep_args=(row_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,),
row_wise=True
)
assert call_i[0] == 31
assert list(sim_lst) == [1]
assert list(row_lst) == [2, 8, 14, 20, 26]
assert list(segment_lst) == [3, 6, 9, 12, 15, 18, 21, 24, 27, 30]
assert list(order_lst) == [4, 5, 7, 10, 11, 13, 16, 17, 19, 22, 23, 25, 28, 29, 31]
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
row_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
active_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
row_prep_func_nb=row_prep_func_nb, row_prep_args=(row_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,),
active_mask=active_mask,
row_wise=True
)
assert call_i[0] == 14
assert list(sim_lst) == [1]
assert list(row_lst) == [2, 5, 9]
assert list(segment_lst) == [3, 6, 10, 13]
assert list(order_lst) == [4, 7, 8, 11, 12, 14]
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_max_orders(self, test_row_wise):
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, row_wise=test_row_wise)
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, row_wise=test_row_wise, max_orders=15)
with pytest.raises(Exception) as e_info:
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, row_wise=test_row_wise, max_orders=14)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_max_logs(self, test_row_wise):
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func_nb, np.inf, row_wise=test_row_wise)
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func_nb, np.inf, row_wise=test_row_wise, max_logs=15)
with pytest.raises(Exception) as e_info:
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func_nb, np.inf, row_wise=test_row_wise, max_logs=14)
# ############# Portfolio ############# #
price_na = pd.DataFrame({
'a': [np.nan, 2., 3., 4., 5.],
'b': [1., 2., np.nan, 4., 5.],
'c': [1., 2., 3., 4., np.nan]
}, index=price.index)
order_size_new = pd.Series([1., 0.1, -1., -0.1, 1.])
directions = ['longonly', 'shortonly', 'all']
group_by = pd.Index(['first', 'first', 'second'], name='group')
portfolio = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='shares', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=None,
init_cash=[100., 100., 100.], freq='1D'
) # independent
portfolio_grouped = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='shares', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=False,
init_cash=[100., 100., 100.], freq='1D'
) # grouped
portfolio_shared = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='shares', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=True,
init_cash=[200., 100.], freq='1D'
) # shared
class TestPortfolio:
def test_config(self, tmp_path):
assert vbt.Portfolio.loads(portfolio['a'].dumps()) == portfolio['a']
assert vbt.Portfolio.loads(portfolio.dumps()) == portfolio
portfolio.save(tmp_path / 'portfolio')
assert vbt.Portfolio.load(tmp_path / 'portfolio') == portfolio
def test_wrapper(self):
pd.testing.assert_index_equal(
portfolio.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
price_na.columns
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.grouper.group_by is None
assert portfolio.wrapper.grouper.allow_enable
assert portfolio.wrapper.grouper.allow_disable
assert portfolio.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
portfolio_grouped.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
portfolio_grouped.wrapper.columns,
price_na.columns
)
assert portfolio_grouped.wrapper.ndim == 2
pd.testing.assert_index_equal(
portfolio_grouped.wrapper.grouper.group_by,
group_by
)
assert portfolio_grouped.wrapper.grouper.allow_enable
assert portfolio_grouped.wrapper.grouper.allow_disable
assert portfolio_grouped.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
portfolio_shared.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
portfolio_shared.wrapper.columns,
price_na.columns
)
assert portfolio_shared.wrapper.ndim == 2
pd.testing.assert_index_equal(
portfolio_shared.wrapper.grouper.group_by,
group_by
)
assert not portfolio_shared.wrapper.grouper.allow_enable
assert portfolio_shared.wrapper.grouper.allow_disable
assert not portfolio_shared.wrapper.grouper.allow_modify
def test_indexing(self):
assert portfolio['a'].wrapper == portfolio.wrapper['a']
assert portfolio['a'].orders == portfolio.orders['a']
assert portfolio['a'].logs == portfolio.logs['a']
assert portfolio['a'].init_cash == portfolio.init_cash['a']
pd.testing.assert_series_equal(portfolio['a'].call_seq, portfolio.call_seq['a'])
assert portfolio['c'].wrapper == portfolio.wrapper['c']
assert portfolio['c'].orders == portfolio.orders['c']
assert portfolio['c'].logs == portfolio.logs['c']
assert portfolio['c'].init_cash == portfolio.init_cash['c']
pd.testing.assert_series_equal(portfolio['c'].call_seq, portfolio.call_seq['c'])
assert portfolio[['c']].wrapper == portfolio.wrapper[['c']]
assert portfolio[['c']].orders == portfolio.orders[['c']]
assert portfolio[['c']].logs == portfolio.logs[['c']]
pd.testing.assert_series_equal(portfolio[['c']].init_cash, portfolio.init_cash[['c']])
pd.testing.assert_frame_equal(portfolio[['c']].call_seq, portfolio.call_seq[['c']])
assert portfolio_grouped['first'].wrapper == portfolio_grouped.wrapper['first']
assert portfolio_grouped['first'].orders == portfolio_grouped.orders['first']
assert portfolio_grouped['first'].logs == portfolio_grouped.logs['first']
assert portfolio_grouped['first'].init_cash == portfolio_grouped.init_cash['first']
pd.testing.assert_frame_equal(portfolio_grouped['first'].call_seq, portfolio_grouped.call_seq[['a', 'b']])
assert portfolio_grouped[['first']].wrapper == portfolio_grouped.wrapper[['first']]
assert portfolio_grouped[['first']].orders == portfolio_grouped.orders[['first']]
assert portfolio_grouped[['first']].logs == portfolio_grouped.logs[['first']]
pd.testing.assert_series_equal(
portfolio_grouped[['first']].init_cash,
portfolio_grouped.init_cash[['first']])
pd.testing.assert_frame_equal(portfolio_grouped[['first']].call_seq, portfolio_grouped.call_seq[['a', 'b']])
assert portfolio_grouped['second'].wrapper == portfolio_grouped.wrapper['second']
assert portfolio_grouped['second'].orders == portfolio_grouped.orders['second']
assert portfolio_grouped['second'].logs == portfolio_grouped.logs['second']
assert portfolio_grouped['second'].init_cash == portfolio_grouped.init_cash['second']
pd.testing.assert_series_equal(portfolio_grouped['second'].call_seq, portfolio_grouped.call_seq['c'])
assert portfolio_grouped[['second']].orders == portfolio_grouped.orders[['second']]
assert portfolio_grouped[['second']].wrapper == portfolio_grouped.wrapper[['second']]
assert portfolio_grouped[['second']].orders == portfolio_grouped.orders[['second']]
assert portfolio_grouped[['second']].logs == portfolio_grouped.logs[['second']]
pd.testing.assert_series_equal(
portfolio_grouped[['second']].init_cash,
portfolio_grouped.init_cash[['second']])
pd.testing.assert_frame_equal(portfolio_grouped[['second']].call_seq, portfolio_grouped.call_seq[['c']])
assert portfolio_shared['first'].wrapper == portfolio_shared.wrapper['first']
assert portfolio_shared['first'].orders == portfolio_shared.orders['first']
assert portfolio_shared['first'].logs == portfolio_shared.logs['first']
assert portfolio_shared['first'].init_cash == portfolio_shared.init_cash['first']
pd.testing.assert_frame_equal(portfolio_shared['first'].call_seq, portfolio_shared.call_seq[['a', 'b']])
assert portfolio_shared[['first']].orders == portfolio_shared.orders[['first']]
assert portfolio_shared[['first']].wrapper == portfolio_shared.wrapper[['first']]
assert portfolio_shared[['first']].orders == portfolio_shared.orders[['first']]
assert portfolio_shared[['first']].logs == portfolio_shared.logs[['first']]
pd.testing.assert_series_equal(
portfolio_shared[['first']].init_cash,
portfolio_shared.init_cash[['first']])
pd.testing.assert_frame_equal(portfolio_shared[['first']].call_seq, portfolio_shared.call_seq[['a', 'b']])
assert portfolio_shared['second'].wrapper == portfolio_shared.wrapper['second']
assert portfolio_shared['second'].orders == portfolio_shared.orders['second']
assert portfolio_shared['second'].logs == portfolio_shared.logs['second']
assert portfolio_shared['second'].init_cash == portfolio_shared.init_cash['second']
pd.testing.assert_series_equal(portfolio_shared['second'].call_seq, portfolio_shared.call_seq['c'])
assert portfolio_shared[['second']].wrapper == portfolio_shared.wrapper[['second']]
assert portfolio_shared[['second']].orders == portfolio_shared.orders[['second']]
assert portfolio_shared[['second']].logs == portfolio_shared.logs[['second']]
pd.testing.assert_series_equal(
portfolio_shared[['second']].init_cash,
portfolio_shared.init_cash[['second']])
pd.testing.assert_frame_equal(portfolio_shared[['second']].call_seq, portfolio_shared.call_seq[['c']])
def test_regroup(self):
assert portfolio.regroup(None) == portfolio
assert portfolio.regroup(False) == portfolio
assert portfolio.regroup(group_by) != portfolio
pd.testing.assert_index_equal(portfolio.regroup(group_by).wrapper.grouper.group_by, group_by)
assert portfolio_grouped.regroup(None) == portfolio_grouped
assert portfolio_grouped.regroup(False) != portfolio_grouped
assert portfolio_grouped.regroup(False).wrapper.grouper.group_by is None
assert portfolio_grouped.regroup(group_by) == portfolio_grouped
assert portfolio_shared.regroup(None) == portfolio_shared
with pytest.raises(Exception) as e_info:
_ = portfolio_shared.regroup(False)
assert portfolio_shared.regroup(group_by) == portfolio_shared
def test_cash_sharing(self):
assert not portfolio.cash_sharing
assert not portfolio_grouped.cash_sharing
assert portfolio_shared.cash_sharing
def test_call_seq(self):
pd.testing.assert_frame_equal(
portfolio.call_seq,
pd.DataFrame(
np.array([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_grouped.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_shared.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
def test_incl_unrealized(self):
assert not vbt.Portfolio.from_orders(price_na, 1000., incl_unrealized=False).incl_unrealized
assert vbt.Portfolio.from_orders(price_na, 1000., incl_unrealized=True).incl_unrealized
def test_orders(self):
record_arrays_close(
portfolio.orders.values,
np.array([
(0, 1, 0, 0.1, 2.02, 0.10202, 0), (1, 2, 0, 0.1, 2.9699999999999998, 0.10297, 1),
(2, 4, 0, 1.0, 5.05, 0.1505, 0), (3, 0, 1, 1.0, 0.99, 0.10990000000000001, 1),
(4, 1, 1, 0.1, 1.98, 0.10198, 1), (5, 3, 1, 0.1, 4.04, 0.10404000000000001, 0),
(6, 4, 1, 1.0, 4.95, 0.14950000000000002, 1), (7, 0, 2, 1.0, 1.01, 0.1101, 0),
(8, 1, 2, 0.1, 2.02, 0.10202, 0), (9, 2, 2, 1.0, 2.9699999999999998, 0.1297, 1),
(10, 3, 2, 0.1, 3.96, 0.10396000000000001, 1)
], dtype=order_dt)
)
result = pd.Series(
np.array([3, 4, 4]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.orders.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_orders(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_orders(group_by=False).count(),
result
)
result = pd.Series(
np.array([7, 4]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_orders(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.orders.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.orders.count(),
result
)
def test_logs(self):
record_arrays_close(
portfolio.logs.values,
np.array([
(0, 0, 0, 0, 100.0, 0.0, np.nan, 100.0, 1.0, 0, 0, np.nan, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.0, 0.0, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(1, 1, 0, 0, 100.0, 0.0, 2.0, 100.0, 0.1, 0, 0, 2.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 99.69598, 0.1, 0.1, 2.02, 0.10202, 0, 0, -1, 0),
(2, 2, 0, 0, 99.69598, 0.1, 3.0, 99.99598, -1.0, 0, 0, 3.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 99.89001, 0.0, 0.1, 2.9699999999999998, 0.10297, 1, 0, -1, 1),
(3, 3, 0, 0, 99.89001, 0.0, 4.0, 99.89001, -0.1, 0, 0, 4.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 99.89001, 0.0, np.nan, np.nan, np.nan, -1, 2, 8, -1),
(4, 4, 0, 0, 99.89001, 0.0, 5.0, 99.89001, 1.0, 0, 0, 5.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 94.68951, 1.0, 1.0, 5.05, 0.1505, 0, 0, -1, 2),
(5, 0, 1, 1, 100.0, 0.0, 1.0, 100.0, 1.0, 0, 1, 1.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.8801, -1.0, 1.0, 0.99, 0.10990000000000001, 1, 0, -1, 3),
(6, 1, 1, 1, 100.8801, -1.0, 2.0, 98.8801, 0.1, 0, 1, 2.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.97612, -1.1, 0.1, 1.98, 0.10198, 1, 0, -1, 4),
(7, 2, 1, 1, 100.97612, -1.1, np.nan, np.nan, -1.0, 0, 1, np.nan, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.97612, -1.1, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(8, 3, 1, 1, 100.97612, -1.1, 4.0, 96.57611999999999, -0.1, 0, 1, 4.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.46808, -1.0, 0.1, 4.04, 0.10404000000000001, 0, 0, -1, 5),
(9, 4, 1, 1, 100.46808, -1.0, 5.0, 95.46808, 1.0, 0, 1, 5.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 105.26858, -2.0, 1.0, 4.95, 0.14950000000000002, 1, 0, -1, 6),
(10, 0, 2, 2, 100.0, 0.0, 1.0, 100.0, 1.0, 0, 2, 1.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 98.8799, 1.0, 1.0, 1.01, 0.1101, 0, 0, -1, 7),
(11, 1, 2, 2, 98.8799, 1.0, 2.0, 100.8799, 0.1, 0, 2, 2.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 98.57588000000001, 1.1, 0.1, 2.02, 0.10202, 0, 0, -1, 8),
(12, 2, 2, 2, 98.57588000000001, 1.1, 3.0, 101.87588000000001, -1.0, 0, 2, 3.0,
0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, True, False, True, 101.41618000000001,
0.10000000000000009, 1.0, 2.9699999999999998, 0.1297, 1, 0, -1, 9),
(13, 3, 2, 2, 101.41618000000001, 0.10000000000000009, 4.0, 101.81618000000002,
-0.1, 0, 2, 4.0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, True, False, True,
101.70822000000001, 0.0, 0.1, 3.96, 0.10396000000000001, 1, 0, -1, 10),
(14, 4, 2, 2, 101.70822000000001, 0.0, np.nan, 101.70822000000001, 1.0, 0, 2, np.nan, 0.01, 0.1, 0.01,
1e-08, np.inf, 0.0, True, False, True, 101.70822000000001, 0.0, np.nan, np.nan, np.nan, -1, 1, 1, -1)
], dtype=log_dt)
)
result = pd.Series(
np.array([5, 5, 5]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.logs.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_logs(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_logs(group_by=False).count(),
result
)
result = pd.Series(
np.array([10, 5]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_logs(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.logs.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.logs.count(),
result
)
def test_trades(self):
record_arrays_close(
portfolio.trades.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998, 0.10297,
-0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 0.1, 0, 1.0799999999999998, 0.019261818181818182,
3, 4.04, 0.10404000000000001, -0.4193018181818182, -3.882424242424243, 1, 1, 2),
(3, 1, 2.0, 0, 3.015, 0.3421181818181819, 4, 5.0, 0.0,
-4.312118181818182, -0.7151108095884214, 1, 0, 2),
(4, 2, 1.0, 0, 1.1018181818181818, 0.19283636363636364, 2,
2.9699999999999998, 0.1297, 1.5456454545454543, 1.4028135313531351, 0, 1, 3),
(5, 2, 0.10000000000000009, 0, 1.1018181818181818, 0.019283636363636378,
3, 3.96, 0.10396000000000001, 0.1625745454545457, 1.4755115511551162, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 2, 2]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.trades.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_trades(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_trades(group_by=False).count(),
result
)
result = pd.Series(
np.array([4, 2]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_trades(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.trades.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.trades.count(),
result
)
def test_positions(self):
record_arrays_close(
portfolio.positions.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998,
0.10297, -0.10999000000000003, -0.5445049504950497, 0, 1),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0),
(2, 1, 2.1, 0, 2.9228571428571426, 0.36138000000000003, 4, 4.954285714285714,
0.10404000000000001, -4.731420000000001, -0.7708406647116326, 1, 0),
(3, 2, 1.1, 0, 1.1018181818181818, 0.21212000000000003, 3,
3.06, 0.23366000000000003, 1.7082200000000003, 1.4094224422442245, 0, 1)
], dtype=position_dt)
)
result = pd.Series(
np.array([2, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.positions.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_positions(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_positions(group_by=False).count(),
result
)
result = pd.Series(
np.array([3, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_positions(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.positions.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.positions.count(),
result
)
def test_drawdowns(self):
record_arrays_close(
portfolio.drawdowns.values,
np.array([
(0, 0, 0, 4, 4, 0), (1, 1, 0, 4, 4, 0), (2, 2, 2, 3, 4, 0)
], dtype=drawdown_dt)
)
result = pd.Series(
np.array([1, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_drawdowns(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_drawdowns(group_by=False).count(),
result
)
result = pd.Series(
np.array([1, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_drawdowns(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.drawdowns.count(),
result
)
def test_close(self):
pd.testing.assert_frame_equal(portfolio.close, price_na)
pd.testing.assert_frame_equal(portfolio_grouped.close, price_na)
pd.testing.assert_frame_equal(portfolio_shared.close, price_na)
def test_fill_close(self):
pd.testing.assert_frame_equal(
portfolio.fill_close(ffill=False, bfill=False),
price_na
)
pd.testing.assert_frame_equal(
portfolio.fill_close(ffill=True, bfill=False),
price_na.ffill()
)
pd.testing.assert_frame_equal(
portfolio.fill_close(ffill=False, bfill=True),
price_na.bfill()
)
pd.testing.assert_frame_equal(
portfolio.fill_close(ffill=True, bfill=True),
price_na.ffill().bfill()
)
def test_share_flow(self):
pd.testing.assert_frame_equal(
portfolio.share_flow(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 0.1],
[-0.1, 0., -1.],
[0., 0., -0.1],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.share_flow(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 0.1, 0.],
[0., 0., 0.],
[0., -0.1, 0.],
[0., 1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -0.1, 0.1],
[-0.1, 0., -1.],
[0., 0.1, -0.1],
[1., -1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.share_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.share_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.share_flow(),
result
)
def test_shares(self):
pd.testing.assert_frame_equal(
portfolio.shares(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 1.1],
[0., 0., 0.1],
[0., 0., 0.],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.shares(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 1.1, 0.],
[0., 1.1, 0.],
[0., 1., 0.],
[0., 2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -1.1, 1.1],
[0., -1.1, 0.1],
[0., -1., 0.],
[1., -2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.shares(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.shares(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.shares(),
result
)
def test_pos_mask(self):
pd.testing.assert_frame_equal(
portfolio.pos_mask(direction='longonly'),
pd.DataFrame(
np.array([
[False, False, True],
[True, False, True],
[False, False, True],
[False, False, False],
[True, False, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.pos_mask(direction='shortonly'),
pd.DataFrame(
np.array([
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[False, True, True],
[True, True, True],
[False, True, True],
[False, True, False],
[True, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.pos_mask(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.pos_mask(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.pos_mask(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[True, True],
[True, True],
[True, True],
[True, False],
[True, False]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.pos_mask(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.pos_mask(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.pos_mask(),
result
)
def test_pos_coverage(self):
pd.testing.assert_series_equal(
portfolio.pos_coverage(direction='longonly'),
pd.Series(np.array([0.4, 0., 0.6]), index=price_na.columns).rename('pos_coverage')
)
pd.testing.assert_series_equal(
portfolio.pos_coverage(direction='shortonly'),
pd.Series(np.array([0., 1., 0.]), index=price_na.columns).rename('pos_coverage')
)
result = pd.Series(np.array([0.4, 1., 0.6]), index=price_na.columns).rename('pos_coverage')
pd.testing.assert_series_equal(
portfolio.pos_coverage(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.pos_coverage(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.pos_coverage(group_by=False),
result
)
result = pd.Series(
np.array([0.7, 0.6]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('pos_coverage')
pd.testing.assert_series_equal(
portfolio.pos_coverage(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.pos_coverage(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.pos_coverage(),
result
)
def test_cash_flow(self):
pd.testing.assert_frame_equal(
portfolio.cash_flow(short_cash=False),
pd.DataFrame(
np.array([
[0., -1.0999, -1.1201],
[-0.30402, -0.29998, -0.30402],
[0.19403, 0., 2.8403],
[0., 0.29996, 0.29204],
[-5.2005, -5.0995, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., 0.8801, -1.1201],
[-0.30402, 0.09602, -0.30402],
[0.19403, 0., 2.8403],
[0., -0.50804, 0.29204],
[-5.2005, 4.8005, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.cash_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.cash_flow(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.cash_flow(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0.8801, -1.1201],
[-0.208, -0.30402],
[0.19403, 2.8403],
[-0.50804, 0.29204],
[-0.4, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.cash_flow(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.cash_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.cash_flow(),
result
)
def test_init_cash(self):
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
portfolio_grouped.get_init_cash(group_by=False),
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
portfolio_shared.get_init_cash(group_by=False),
pd.Series(np.array([200., 200., 100.]), index=price_na.columns).rename('init_cash')
)
result = pd.Series(
np.array([200., 100.]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
pd.testing.assert_series_equal(
portfolio.get_init_cash(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.init_cash,
result
)
pd.testing.assert_series_equal(
portfolio_shared.init_cash,
result
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=None).init_cash,
pd.Series(
np.array([14000., 12000., 10000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=None).init_cash,
pd.Series(
np.array([14000., 14000., 14000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
def test_cash(self):
pd.testing.assert_frame_equal(
portfolio.cash(short_cash=False),
pd.DataFrame(
np.array([
[100., 98.9001, 98.8799],
[99.69598, 98.60012, 98.57588],
[99.89001, 98.60012, 101.41618],
[99.89001, 98.90008, 101.70822],
[94.68951, 93.80058, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[100., 100.8801, 98.8799],
[99.69598, 100.97612, 98.57588],
[99.89001, 100.97612, 101.41618],
[99.89001, 100.46808, 101.70822],
[94.68951, 105.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.cash(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.cash(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.cash(group_by=False),
pd.DataFrame(
np.array([
[200., 200.8801, 98.8799],
[199.69598, 200.97612, 98.57588],
[199.89001, 200.97612, 101.41618],
[199.89001, 200.46808, 101.70822],
[194.68951, 205.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_shared.cash(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[200.8801, 200.8801, 98.8799],
[200.6721, 200.97612, 98.57588000000001],
[200.86613, 200.6721, 101.41618000000001],
[200.35809, 200.35809, 101.70822000000001],
[199.95809, 205.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200.8801, 98.8799],
[200.6721, 98.57588],
[200.86613, 101.41618],
[200.35809, 101.70822],
[199.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.cash(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.cash(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.cash(),
result
)
def test_holding_value(self):
pd.testing.assert_frame_equal(
portfolio.holding_value(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.2, 0., 2.2],
[0., 0., 0.3],
[0., 0., 0.],
[5., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.holding_value(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 2.2, 0.],
[0., np.nan, 0.],
[0., 4., 0.],
[0., 10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.2, -2.2, 2.2],
[0., np.nan, 0.3],
[0., -4., 0.],
[5., -10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.holding_value(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.holding_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.holding_value(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-1., 1.],
[-2., 2.2],
[np.nan, 0.3],
[-4., 0.],
[-5., 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.holding_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.holding_value(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.holding_value(),
result
)
def test_gross_exposure(self):
pd.testing.assert_frame_equal(
portfolio.gross_exposure(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 0.01001202],
[0.00200208, 0., 0.02183062],
[0., 0., 0.00294938],
[0., 0., 0.],
[0.05015573, 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.gross_exposure(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 0.01001, 0.],
[0., 0.02182537, 0.],
[0., np.nan, 0.],
[0., 0.03887266, 0.],
[0., 0.09633858, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -0.01021449, 0.01001202],
[0.00200208, -0.02282155, 0.02183062],
[0., np.nan, 0.00294938],
[0., -0.0421496, 0.],
[0.05015573, -0.11933092, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.gross_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.gross_exposure(group_by=False),
pd.DataFrame(
np.array([
[0., -0.00505305, 0.01001202],
[0.00100052, -0.01120162, 0.02183062],
[0., np.nan, 0.00294938],
[0., -0.02052334, 0.],
[0.02503887, -0.05440679, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.005003, 0.01001202],
[-0.01006684, 0.02183062],
[np.nan, 0.00294938],
[-0.02037095, 0.],
[-0.02564654, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.gross_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.gross_exposure(),
result
)
def test_net_exposure(self):
result = pd.DataFrame(
np.array([
[0., -0.01001, 0.01001202],
[0.00200208, -0.02182537, 0.02183062],
[0., np.nan, 0.00294938],
[0., -0.03887266, 0.],
[0.05015573, -0.09633858, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.net_exposure(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.net_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.net_exposure(group_by=False),
pd.DataFrame(
np.array([
[0., -0.0050025, 0.01001202],
[0.00100052, -0.01095617, 0.02183062],
[0., np.nan, 0.00294938],
[0., -0.01971414, 0.],
[0.02503887, -0.04906757, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.00495344, 0.01001202],
[-0.00984861, 0.02183062],
[np.nan, 0.00294938],
[-0.01957348, 0.],
[-0.02323332, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.net_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.net_exposure(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.net_exposure(),
result
)
def test_value(self):
result = pd.DataFrame(
np.array([
[100., 99.8801, 99.8799],
[99.89598, 98.77612, 100.77588],
[99.89001, np.nan, 101.71618],
[99.89001, 96.46808, 101.70822],
[99.68951, 95.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.value(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.value(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.value(group_by=False),
pd.DataFrame(
np.array([
[200., 199.8801, 99.8799],
[199.89598, 198.77612, 100.77588],
[199.89001, np.nan, 101.71618],
[199.89001, 196.46808, 101.70822],
[199.68951, 195.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_shared.value(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[199.8801, 199.8801, 99.8799],
[198.6721, 198.77612000000002, 100.77588000000002],
[np.nan, np.nan, 101.71618000000001],
[196.35809, 196.35809, 101.70822000000001],
[194.95809, 195.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[199.8801, 99.8799],
[198.6721, 100.77588],
[np.nan, 101.71618],
[196.35809, 101.70822],
[194.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.value(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.value(),
result
)
def test_total_profit(self):
result = pd.Series(
np.array([-0.31049, -4.73142, 1.70822]),
index=price_na.columns
).rename('total_profit')
pd.testing.assert_series_equal(
portfolio.total_profit(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_profit(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_profit(group_by=False),
result
)
result = pd.Series(
np.array([-5.04191, 1.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_profit')
pd.testing.assert_series_equal(
portfolio.total_profit(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_profit(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_profit(),
result
)
def test_final_value(self):
result = pd.Series(
np.array([99.68951, 95.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
pd.testing.assert_series_equal(
portfolio.final_value(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.final_value(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.final_value(group_by=False),
pd.Series(
np.array([199.68951, 195.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
)
result = pd.Series(
np.array([194.95809, 101.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('final_value')
pd.testing.assert_series_equal(
portfolio.final_value(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.final_value(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.final_value(),
result
)
def test_total_return(self):
result = pd.Series(
np.array([-0.0031049, -0.0473142, 0.0170822]),
index=price_na.columns
).rename('total_return')
pd.testing.assert_series_equal(
portfolio.total_return(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_return(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_return(group_by=False),
pd.Series(
np.array([-0.00155245, -0.0236571, 0.0170822]),
index=price_na.columns
).rename('total_return')
)
result = pd.Series(
np.array([-0.02520955, 0.0170822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_return')
pd.testing.assert_series_equal(
portfolio.total_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_return(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_return(),
result
)
def test_returns(self):
result = pd.DataFrame(
np.array([
[0.00000000e+00, -1.19900000e-03, -1.20100000e-03],
[-1.04020000e-03, -1.10530526e-02, 8.97057366e-03],
[-5.97621646e-05, np.nan, 9.33060570e-03],
[0.00000000e+00, np.nan, -7.82569695e-05],
[-2.00720773e-03, -1.24341648e-02, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.returns(group_by=False),
pd.DataFrame(
np.array([
[0.00000000e+00, -5.99500000e-04, -1.20100000e-03],
[-5.20100000e-04, -5.52321117e-03, 8.97057366e-03],
[-2.98655331e-05, np.nan, 9.33060570e-03],
[0.00000000e+00, np.nan, -7.82569695e-05],
[-1.00305163e-03, -6.10531746e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_shared.returns(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[0.0, -0.0005995000000000062, -1.20100000e-03],
[-0.0005233022960706736, -0.005523211165093367, 8.97057366e-03],
[np.nan, np.nan, 9.33060570e-03],
[0.0, np.nan, -7.82569695e-05],
[-0.0010273695869600474, -0.0061087373583639994, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-5.99500000e-04, -1.20100000e-03],
[-6.04362315e-03, 8.97057366e-03],
[np.nan, 9.33060570e-03],
[np.nan, -7.82569695e-05],
[-7.12983101e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.returns(),
result
)
def test_active_returns(self):
result = pd.DataFrame(
np.array([
[0., -np.inf, -np.inf],
[-np.inf, -1.10398, 0.89598],
[-0.02985, np.nan, 0.42740909],
[0., np.nan, -0.02653333],
[-np.inf, -0.299875, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.active_returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.active_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.active_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-np.inf, -np.inf],
[-1.208, 0.89598],
[np.nan, 0.42740909],
[np.nan, -0.02653333],
[-0.35, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.active_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.active_returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.active_returns(),
result
)
def test_market_value(self):
result = pd.DataFrame(
np.array([
[100., 100., 100.],
[100., 200., 200.],
[150., 200., 300.],
[200., 400., 400.],
[250., 500., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.market_value(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.market_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.market_value(group_by=False),
pd.DataFrame(
np.array([
[200., 200., 100.],
[200., 400., 200.],
[300., 400., 300.],
[400., 800., 400.],
[500., 1000., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200., 100.],
[300., 200.],
[350., 300.],
[600., 400.],
[750., 400.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.market_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.market_value(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.market_value(),
result
)
def test_market_returns(self):
result = pd.DataFrame(
np.array([
[0., 0., 0.],
[0., 1., 1.],
[0.5, 0., 0.5],
[0.33333333, 1., 0.33333333],
[0.25, 0.25, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.market_returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.market_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.market_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0., 0.],
[0.5, 1.],
[0.16666667, 0.5],
[0.71428571, 0.33333333],
[0.25, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.market_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.market_returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.market_returns(),
result
)
def test_total_market_return(self):
result = pd.Series(
np.array([1.5, 4., 3.]),
index=price_na.columns
).rename('total_market_return')
pd.testing.assert_series_equal(
portfolio.total_market_return(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_market_return(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_market_return(group_by=False),
result
)
result = pd.Series(
np.array([2.75, 3.]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_market_return')
pd.testing.assert_series_equal(
portfolio.total_market_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_market_return(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_market_return(),
result
)
def test_return_method(self):
pd.testing.assert_frame_equal(
portfolio_shared.cumulative_returns(),
pd.DataFrame(
np.array([
[-0.0005995, -0.001201],
[-0.0066395, 0.0077588],
[-0.0066395, 0.0171618],
[-0.0066395, 0.0170822],
[-0.01372199, 0.0170822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
)
pd.testing.assert_frame_equal(
portfolio_shared.cumulative_returns(group_by=False),
pd.DataFrame(
np.array([
[0., -0.0005995, -0.001201],
[-0.0005201, -0.0061194, 0.0077588],
[-0.00054995, -0.0061194, 0.0171618],
[-0.00054995, -0.0061194, 0.0170822],
[-0.00155245, -0.01218736, 0.0170822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_series_equal(
portfolio_shared.sharpe_ratio(),
pd.Series(
np.array([-20.82791491, 10.2576347]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
portfolio_shared.sharpe_ratio(risk_free=0.01),
pd.Series(
np.array([-66.19490297745766, -19.873024060759022]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
portfolio_shared.sharpe_ratio(year_freq='365D'),
pd.Series(
np.array([-25.06639947, 12.34506527]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
portfolio_shared.sharpe_ratio(group_by=False),
pd.Series(
np.array([-11.058998255347488, -21.39151322377427, 10.257634695847853]),
index=price_na.columns
).rename('sharpe_ratio')
)
def test_stats(self):
pd.testing.assert_series_equal(
portfolio.stats(),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
|
pd.Timedelta('5 days 00:00:00')
|
pandas.Timedelta
|
# anhost pre-processing module
# About privacy:
# - This tool receives input with personally identifying information (PII), but
# it is removed at the earliest opportunity possible. It is not used for any
# purpose except to ensure that each student has only one record.
# - This module is used for data conversion and PII scrambling. It is NOT used
# for actual data analysis. Input data for this module is NOT AVAILABLE to the
# general public, but the output is.
# - This module produces an intermediate file suitable for later use.
import os #used for file path joining
import pandas as pd
import numpy as np
# We use a configuration script to set it all up. Refer to
# config_template.py for template.
import config
# Column name definitions
cn = {
"userid": "<User ID>",
"date": "<Tested on>",
"time": "<Taken Time>",
"score": "<Score>"
}
import_cns = [cn["userid"], cn["date"], cn["time"], cn["score"]]
export_sec_cns = ["year", "sec", "timestamp", "deadline"]
# Final dataframe to be exported (will contain no user ID and only minimum data
# necessary for further processing)
df_dtypes = np.dtype([
("year", int),
("sec", str),
("timestamp", np.datetime64),
("deadline", np.datetime64),
])
df = pd.DataFrame(np.empty(0, dtype=df_dtypes))
# Actual import
# TODO: Move indexing to the process_data script. This file is supposed to be
# about data sanitization only. No other functions should be implemented.
for (year, deadline, sections) in config.original_datasets:
df_year = pd.DataFrame(np.empty(0, dtype=df_dtypes))
for (section, filename) in sections:
df_sub = pd.read_csv(os.path.join(config.original_dir, filename), usecols=import_cns,
converters={cn["userid"]: str, cn["date"]: str, cn["time"]: str, cn["score"]: int})
new_timestamp =
|
pd.to_datetime(df_sub[cn["date"]] + " " + df_sub[cn["time"]])
|
pandas.to_datetime
|
#!/usr/bin/env python
#import standard libraries
import obspy.imaging.beachball
import datetime
import os
import csv
import pandas as pd
import numpy as np
import fnmatch
from geopy.distance import geodesic
from math import *
#from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from matplotlib import path
class NewFile:
'''Creates a file object with associated uncertainty and event type'''
def __init__(self, filename, unc, event_type, source):
self.filename = filename
self.event_type = event_type
self.unc = unc
self.name = source
def maketime(timestring):
'''Used in argument parser below. Makes a datetime object from a timestring.'''
TIMEFMT = '%Y-%m-%dT%H:%M:%S'
DATEFMT = '%Y-%m-%d'
TIMEFMT2 = '%m-%d-%YT%H:%M:%S.%f'
outtime = None
try:
outtime = datetime.strptime(timestring, TIMEFMT)
except:
try:
outtime = datetime.strptime(timestring, DATEFMT)
except:
try:
outtime = datetime.strptime(timestring, TIMEFMT2)
except:
print('Could not parse time or date from %s' % timestring)
print (outtime)
return outtime
def infile(s):
'''Stores filename, event type, and uncertainty where provided from comma separated string.'''
default_uncertainty = 15
try:
infile,unc,etype = s.split(',')
unc = float(unc)
return (infile, unc, etype)
except:
try:
s = s.split(',')
infile, unc, etype = s[0], default_uncertainty, s[1]
return (infile, unc, etype)
except:
raise argparse.ArgumentTypeError('Input file information must be \
given as infile,unc,etype or as infile,etype')
def datelinecross(x):
''' Arguments: x - longitude value (positive or negative)
Returns: x - a positive longitude. Stays the same if the input was positive,
is changed to positive if the input was negative '''
if x<0:
return x+360
else:
return x
###############################################
### 9 ###
###############################################
## Written GLM
def meridiancross(x):
''' Arguments: x - longitude value (positive or negative)
Returns: x - a longitude in the -180/180 domain '''
if x>180:
return x-360
else:
return x
def northcross(x):
''' Arguments: x - longitude value (positive or negative)
Returns: x - a longitude in the -180/180 domain '''
if x<90:
return x+360
else:
return x
def unnorthcross(x):
''' Arguments: x - longitude value (positive or negative)
Returns: x - a longitude in the -180/180 domain '''
if x>360:
return x-360
else:
return x
def zerothreesixty(data):
data['lon']=data.apply(lambda row: datelinecross(row['lon']),axis=1)
return data
def oneeighty(data):
data['lon']=data.apply(lambda row: meridiancross(row['lon']),axis=1)
return data
def northernaz(data):
data['az']=data.apply(lambda row: northcross(row['az']),axis=1)
return data
def notnorthanymore(data):
data['az']=data.apply(lambda row: unnorthcross(row['az']),axis=1)
return data
def writetofile(input_file, output_file, event_type, uncertainty, args, catalogs, file_no, seismo_thick, slabname, name):
''' Writes an input file object to the given output file.
Acquires the necessary columns from the file, calculates moment tensor information.
Eliminates rows of data that do not fall within the specified bounds
(date, magnitude, & location).
If the event type is an earthquake, the catalog is compared to all previously
entered catalogs. Duplicate events are removed from the subsequent entries
(prioritization is determined by the order in which catalogs are entered).
Writes filtered dataframe to output file and prints progress to console.
Arguments: input_file - input file from input or slab2database
output_file - file where new dataset will be written
event_type - two letter ID that indicates the type of data (AS, EQ, BA, etc)
uncertainty - the default uncertainty associated with this file or event type
args - arguments provided from command line (bounds, magnitude limits, etc)
catalogs - a list of EQ catalogs that are being written to this file
file_no - file number, used for making event IDs '''
in_file = open(input_file)
fcsv = (input_file[:-4]+'.csv')
# Reading .csv file into dataframe - all files must be in .csv format
try:
if input_file.endswith('.csv'):
data = pd.read_csv(input_file, low_memory=False)
else:
print ('Input file %s was not written to file. MUST BE IN .CSV FORMAT' % input_file)
pass
except:
print ('Could not read file %s. A header line of column labels \
followed by a deliminated dataset is expected. Check file format to ensure this \
is such. All files must be in .csv format.' % input_file)
if 'ID' in data.columns:
pass
elif 'id_no' in data.columns:
data['ID'] = data['id_no'].values
else:
start_ID = file_no*100000
stop_ID = start_ID + len(data)
ID = np.arange(start_ID, stop_ID, 1)
data['ID'] = ID
data = makeframe(data, fcsv, event_type, uncertainty, args, seismo_thick,slabname)
data = inbounds(args, data, slabname)
#If option is chosen at command line, removes duplicate entries for the same event
#alternate preference for global or regional catalogues depending upon input arguments
try:
regional_pref
except NameError:
pass
else:
try:
tup = (data, fcsv)
if len(catalogs) > 0:
for idx, row in enumerate(catalogs):
if fnmatch.fnmatch(row, '*global*'):
position = idx
name_of_file = row
if regional_pref == 0 and position != 0:
first_file = catalogs[0]
catalogs[position] = first_file
catalogs[0] = name_of_file
elif regional_pref == 1 and position != (len(catalogs)-1):
last_file = catalogs[(len(catalogs)-1)]
catalogs[position] = first_file
catalogs[(len(catalogs)-1)] = name_of_file
else:
pass
for cat in catalogs:
data = rid_matches(cat[0], data, cat[1], fcsv)
elif len(catalogs) == 0:
catalogs.append(tup)
except:
print ('If file contains earthquake information (event-type = EQ), \
required columns include: lat,lon,depth,mag,time. The columns of the current \
file: %s. Check file format to ensure these columns are present and properly \
labeled.' % data.columns)
#MF 8.9.16 add source to output file
try:
listints = data['ID'].values.astype(int)
except:
start_ID = file_no*100000
stop_ID = start_ID + len(data)
ID = np.arange(start_ID, stop_ID, 1)
data['id_no'] = data['ID'].values
data['ID'] = ID
data['src'] = name
write_data(data, output_file)
print ('The file: %s was written to %s' % (input_file, output_file))
print ('---------------------------------------------------------------------------------')
def castfloats(data):
'''Casts all numerical and nan values to floats to avoid error in calculations'''
data[['lat']] = data[['lat']].astype(float)
data[['lon']] = data[['lon']].astype(float)
data[['depth']] = data[['depth']].astype(float)
data[['unc']] = data[['unc']].astype(float)
if 'mag' in data.columns:
data[['mag']] = data[['mag']].astype(float)
if 'mrr' in data.columns:
data[['mrr']] = data[['mrr']].astype(float)
data[['mtt']] = data[['mtt']].astype(float)
data[['mpp']] = data[['mpp']].astype(float)
data[['mrt']] = data[['mrt']].astype(float)
data[['mrp']] = data[['mrp']].astype(float)
data[['mtp']] = data[['mtp']].astype(float)
if 'Paz' in data.columns and 'Ppl' in data.columns:
data[['Paz']] = data[['Paz']].astype(float)
data[['Ppl']] = data[['Ppl']].astype(float)
data[['Taz']] = data[['Taz']].astype(float)
data[['Tpl']] = data[['Tpl']].astype(float)
data[['S1']] = data[['S1']].astype(float)
data[['D1']] = data[['D1']].astype(float)
data[['R1']] = data[['R1']].astype(float)
data[['S2']] = data[['S2']].astype(float)
data[['D2']] = data[['D2']].astype(float)
data[['R2']] = data[['R2']].astype(float)
return data
def rid_nans(df):
'''Removes points where lat,lon,depth, or uncertainty values are not provided.'''
df = df[np.isfinite(df['lat'])]
df = df[np.isfinite(df['lon'])]
df = df[np.isfinite(df['depth'])]
df = df[np.isfinite(df['unc'])]
return df
def write_data(df, output_file):
''' Arguments: df - filtered dataframe to be written to file
output_file - output file where data is to be written '''
# If file name does not exist, creates file and writes filtered dataframe to it
df = castfloats(df)
df = rid_nans(df)
if not os.path.isfile(output_file):
with open(output_file, 'w') as f:
df.to_csv(f, header=True, index=False, float_format='%0.3f', na_rep = float('nan'))
# If the output file already exists, new filtered data points are appended to
# existing information
else:
old = pd.read_csv(output_file)
all = pd.concat([old,df],sort=True)
all = castfloats(all)
all = rid_nans(all)
if len(df.columns) > len(old.columns):
all = all[df.columns]
else:
all = all[old.columns]
# Writes desired columns of a filtered dataframe to the output file
with open(output_file, 'w') as f:
all.to_csv(f, header=True, index=False, float_format='%0.3f', na_rep = float('nan'))
def inbounds(args, data, slab):
''' Originally written by Ginvera, modified by MAF July 2016 '''
''' Arguments: args - input arguments provided from command line arguments
data - dataframe to be filtered based on bounds
Returns: data - filtered dataframe based on bounds '''
# Eliminates data points that are not within specified bounds where provided
if 'time' in data.columns:
try:
data['time'] =
|
pd.to_datetime(data['time'])
|
pandas.to_datetime
|
import os
import glob
import pickle
import re
# Our numerical workhorses
import numpy as np
import pandas as pd
# Import the project utils
import sys
sys.path.insert(0, '../')
import NB_sortseq_utils as utils
# Import matplotlib stuff for plotting
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from IPython.core.pylabtools import figsize
from mpl_toolkits.axes_grid1 import make_axes_locatable
# Logo-generating module
import anylogo
utils.set_plotting_style_emat()
#===============================================================================
# Set output directory based on the graphicspath.tex file to print in dropbox
#===============================================================================
output = 'output_figs/'
#------------------------------------------------------------------------------#
# directory where emat csv files are contained
#------------------------------------------------------------------------------#
# Create background array
gc = .508
background_array =
|
pd.DataFrame( [[(1-gc)/2,gc/2,gc/2,(1-gc)/2]])
|
pandas.DataFrame
|
import os
import shutil
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
# path to main/original dataset folder containing ~200k images
path = "dataset/CCPD_2019_first_part/"
source_dir = path + "ccpd_base/"
# path to output folders
train_dir = path + "train/"
test_dir = path + "test/"
valid_dir = path + "validation/"
folders = [train_dir, test_dir, valid_dir]
# get file names
files = sorted(os.listdir(source_dir))
# separate filename into sections and extract license plate area
areas = []
for i, f in enumerate(files):
f = f.split("-")
area = f[0].ljust(4, "0")
areas.append(int(area))
# bin the data into percentage ranges (e.g. 200 - 300 = 2% - 3% area)
bins = [0, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 3500]
binned = pd.cut(areas, bins=bins)
print(f"Binned:\n{binned.describe()}\n")
df =
|
pd.DataFrame({"Area": areas, "Bin": binned, "Filename": files})
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
from random import randrange
from datetime import timedelta
from datetime import datetime
import random
import string
from faker import Faker
countries = ["Казахстан", "Россия", "Италия", "Германия", "Китай", "США"]
regions = ["Алматы", "Нур-Султан", "Шымкент"]
choices = ["да", "нет"]
hospitals = ["Многопрофильный медицинский центр", "домашний карантин", "стационар", "транзит", "вылет в швейцарию", "Городская инфекционная больница"]
addresses = ["Сарайшык", "Кунаева", "Сейфуллина", "Рыскулова", "Абая", "Жангельдина", "Панфилова", "Торекулова", "Желтоксан", "Сыганак"]
def random_date(start, end):
"""
This function will return a random datetime between two datetime
objects.
"""
delta = end - start
int_delta = (delta.days * 24 * 60 * 60) + delta.seconds
random_second = randrange(int_delta)
return start + timedelta(seconds=random_second)
def generate_fake_csv():
columns=["Дата въезда","рейс", "ФИО", "ИИН", "Дата рождения", "Номер паспорта", "Гражданство", "Номер мобильного телефона", "Место и сроки пребывания в последние 14 дней до прибытия в Казахстан (укажите страну, область, штат и т.д.)", "регион", "Место жительство, либо предпологаемое место проживания", "Место работы", "Найден (да/нет)","Госпитализирован (да/нет)","Место госпитализации"]
df =
|
pd.DataFrame(columns=columns)
|
pandas.DataFrame
|
import os
import sys
path = '/net/levsha/share/sameer/github/mirnylab-experimental/sameer/'
subfolders = [item for item in os.listdir(path) if item[0]!='.']
for item in subfolders:
sys.path.insert(0,path+item)
sout = sys.stdout
with open(os.devnull, 'w') as devnull:
sys.stdout = devnull
import numpy as np
import pandas as pd
import mirnylib
from mirnylib.genome import Genome
import cooler
import cooltools.eigdecomp as eigdecomp
import fnmatch
from bioframe import bedslice, fetch_chromsizes, to_bigwig
import DNA_info
sys.stdout = sout
def condense_eigenvector(vector):
assert 'label' in vector.columns
assert not np.any(vector.label.isnull())
cluster_cond = np.logical_or(vector.chrom != vector.chrom.shift(1), vector.label != vector.label.shift(1))
vector['cluster'] = np.cumsum(cluster_cond)
condensed_vector = vector.groupby('cluster').agg({'chrom': lambda x: x.values[0], 'start':'min',
'end':'max', 'label': lambda x: x.values[0]})
assert np.all(condensed_vector['start'] <= condensed_vector['end'])
return condensed_vector
def sort_by_eigenvalue(lams, vectors):
lam_list = []
vector_list = []
for reg, lambdas in lams.iterrows():
if fnmatch.fnmatch(reg, '*:*-*'):
chrom = reg[0:reg.find(':')]
start = int(reg[reg.find(':')+1:reg.find('-')])
end = int(reg[reg.find('-')+1:])
else:
chrom = reg
start, end = None, None
if start is None and end is None:
region_vector = vectors[vectors.chrom == chrom].copy(deep=True)
else:
region_vector = bedslice(vectors.groupby('chrom'), chrom, start, end)
if np.any(np.isnan(lambdas.values)):
srtd_idx = np.array([0,1,2])
else:
srtd_idx = np.argsort(-np.abs(lambdas.values))
region_vector[['E1', 'E2', 'E3']] = region_vector[['E1', 'E2', 'E3']].values[:, srtd_idx]
lam_list.append(lambdas.values[srtd_idx])
vector_list.append(region_vector)
sorted_vectors =
|
pd.concat(vector_list)
|
pandas.concat
|
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pandas as pd
import numpy as np
from mars.tests.core import TestBase, parameterized, ExecutorForTest
from mars.dataframe.datasource.series import from_pandas as from_pandas_series
from mars.dataframe.datasource.dataframe import from_pandas as from_pandas_df
reduction_functions = dict(
sum=dict(func_name='sum', has_min_count=True),
prod=dict(func_name='prod', has_min_count=True),
min=dict(func_name='min', has_min_count=False),
max=dict(func_name='max', has_min_count=False),
mean=dict(func_name='mean', has_min_count=False),
var=dict(func_name='var', has_min_count=False),
std=dict(func_name='std', has_min_count=False),
)
@parameterized(**reduction_functions)
class TestReduction(TestBase):
def setUp(self):
self.executor = ExecutorForTest()
def compute(self, data, **kwargs):
return getattr(data, self.func_name)(**kwargs)
def testSeriesReduction(self):
data = pd.Series(np.random.randint(0, 8, (10,)), index=[str(i) for i in range(10)], name='a')
reduction_df1 = self.compute(from_pandas_series(data))
self.assertEqual(
self.compute(data), self.executor.execute_dataframe(reduction_df1, concat=True)[0])
reduction_df2 = self.compute(from_pandas_series(data, chunk_size=6))
self.assertAlmostEqual(
self.compute(data), self.executor.execute_dataframe(reduction_df2, concat=True)[0])
reduction_df3 = self.compute(from_pandas_series(data, chunk_size=3))
self.assertAlmostEqual(
self.compute(data), self.executor.execute_dataframe(reduction_df3, concat=True)[0])
reduction_df4 = self.compute(from_pandas_series(data, chunk_size=4), axis='index')
self.assertAlmostEqual(
self.compute(data, axis='index'), self.executor.execute_dataframe(reduction_df4, concat=True)[0])
data = pd.Series(np.random.rand(20), name='a')
data[0] = 0.1 # make sure not all elements are NAN
data[data > 0.5] = np.nan
reduction_df1 = self.compute(from_pandas_series(data, chunk_size=3))
self.assertAlmostEqual(
self.compute(data), self.executor.execute_dataframe(reduction_df1, concat=True)[0])
reduction_df2 = self.compute(from_pandas_series(data, chunk_size=3), skipna=False)
self.assertTrue(
np.isnan(self.executor.execute_dataframe(reduction_df2, concat=True)[0]))
if self.has_min_count:
reduction_df3 = self.compute(from_pandas_series(data, chunk_size=3), skipna=False, min_count=2)
self.assertTrue(
np.isnan(self.executor.execute_dataframe(reduction_df3, concat=True)[0]))
reduction_df4 = self.compute(from_pandas_series(data, chunk_size=3), min_count=1)
self.assertAlmostEqual(
self.compute(data, min_count=1),
self.executor.execute_dataframe(reduction_df4, concat=True)[0])
reduction_df5 = self.compute(from_pandas_series(data, chunk_size=3), min_count=21)
self.assertTrue(
np.isnan(self.executor.execute_dataframe(reduction_df5, concat=True)[0]))
def testDataFrameReduction(self):
data = pd.DataFrame(np.random.rand(20, 10))
reduction_df1 = self.compute(from_pandas_df(data))
pd.testing.assert_series_equal(
self.compute(data), self.executor.execute_dataframe(reduction_df1, concat=True)[0])
reduction_df2 = self.compute(from_pandas_df(data, chunk_size=3))
pd.testing.assert_series_equal(
self.compute(data), self.executor.execute_dataframe(reduction_df2, concat=True)[0])
reduction_df3 = self.compute(from_pandas_df(data, chunk_size=6), axis='index', numeric_only=True)
pd.testing.assert_series_equal(
self.compute(data, axis='index', numeric_only=True),
self.executor.execute_dataframe(reduction_df3, concat=True)[0])
reduction_df4 = self.compute(from_pandas_df(data, chunk_size=3), axis=1)
pd.testing.assert_series_equal(
self.compute(data, axis=1),
self.executor.execute_dataframe(reduction_df4, concat=True)[0])
# test null
np_data = np.random.rand(20, 10)
np_data[np_data > 0.6] = np.nan
data = pd.DataFrame(np_data)
reduction_df1 = self.compute(from_pandas_df(data, chunk_size=3))
pd.testing.assert_series_equal(
self.compute(data), self.executor.execute_dataframe(reduction_df1, concat=True)[0])
reduction_df2 = self.compute(from_pandas_df(data, chunk_size=3), skipna=False)
pd.testing.assert_series_equal(
self.compute(data, skipna=False), self.executor.execute_dataframe(reduction_df2, concat=True)[0])
reduction_df2 = self.compute(from_pandas_df(data, chunk_size=3), skipna=False)
pd.testing.assert_series_equal(
self.compute(data, skipna=False), self.executor.execute_dataframe(reduction_df2, concat=True)[0])
if self.has_min_count:
reduction_df3 = self.compute(from_pandas_df(data, chunk_size=3), min_count=15)
pd.testing.assert_series_equal(
self.compute(data, min_count=15),
self.executor.execute_dataframe(reduction_df3, concat=True)[0])
reduction_df4 = self.compute(from_pandas_df(data, chunk_size=3), min_count=3)
pd.testing.assert_series_equal(
self.compute(data, min_count=3),
self.executor.execute_dataframe(reduction_df4, concat=True)[0])
reduction_df5 = self.compute(from_pandas_df(data, chunk_size=3), axis=1, min_count=3)
pd.testing.assert_series_equal(
self.compute(data, axis=1, min_count=3),
self.executor.execute_dataframe(reduction_df5, concat=True)[0])
reduction_df5 = self.compute(from_pandas_df(data, chunk_size=3), axis=1, min_count=8)
pd.testing.assert_series_equal(
self.compute(data, axis=1, min_count=8),
self.executor.execute_dataframe(reduction_df5, concat=True)[0])
# test numeric_only
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
reduction_df1 = self.compute(from_pandas_df(data, chunk_size=2))
pd.testing.assert_series_equal(
self.compute(data), self.executor.execute_dataframe(reduction_df1, concat=True)[0])
reduction_df2 = self.compute(from_pandas_df(data, chunk_size=6), axis='index', numeric_only=True)
pd.testing.assert_series_equal(
self.compute(data, axis='index', numeric_only=True),
self.executor.execute_dataframe(reduction_df2, concat=True)[0])
reduction_df3 = self.compute(from_pandas_df(data, chunk_size=3), axis='columns')
pd.testing.assert_series_equal(
self.compute(data, axis='columns'),
self.executor.execute_dataframe(reduction_df3, concat=True)[0])
data_dict = dict((str(i), np.random.rand(10)) for i in range(10))
data_dict['string'] = [str(i) for i in range(10)]
data_dict['bool'] = np.random.choice([True, False], (10,))
data = pd.DataFrame(data_dict)
reduction_df = self.compute(from_pandas_df(data, chunk_size=3), axis='index', numeric_only=True)
pd.testing.assert_series_equal(
self.compute(data, axis='index', numeric_only=True),
self.executor.execute_dataframe(reduction_df, concat=True)[0])
class TestCount(TestBase):
def setUp(self):
self.executor = ExecutorForTest()
def testSeriesCount(self):
array = np.random.rand(10)
array[[2, 7, 9]] = np.nan
data = pd.Series(array)
series = from_pandas_series(data)
result = self.executor.execute_dataframe(series.count(), concat=True)[0]
expected = data.count()
self.assertEqual(result, expected)
series2 = from_pandas_series(data, chunk_size=1)
result = self.executor.execute_dataframe(series2.count(), concat=True)[0]
expected = data.count()
self.assertEqual(result, expected)
series2 = from_pandas_series(data, chunk_size=3)
result = self.executor.execute_dataframe(series2.count(), concat=True)[0]
expected = data.count()
self.assertEqual(result, expected)
def testDataFrameCount(self):
data = pd.DataFrame({
"Person": ["John", "Myla", "Lewis", "John", "Myla"],
"Age": [24., np.nan, 21., 33, 26],
"Single": [False, True, True, True, False]})
df = from_pandas_df(data)
result = self.executor.execute_dataframe(df.count(), concat=True)[0]
expected = data.count()
pd.testing.assert_series_equal(result, expected)
result = self.executor.execute_dataframe(df.count(axis='columns'), concat=True)[0]
expected = data.count(axis='columns')
pd.testing.assert_series_equal(result, expected)
df2 = from_pandas_df(data, chunk_size=2)
result = self.executor.execute_dataframe(df2.count(), concat=True)[0]
expected = data.count()
pd.testing.assert_series_equal(result, expected)
result = self.executor.execute_dataframe(df2.count(axis='columns'), concat=True)[0]
expected = data.count(axis='columns')
pd.testing.assert_series_equal(result, expected)
df3 = from_pandas_df(data, chunk_size=3)
result = self.executor.execute_dataframe(df3.count(numeric_only=True), concat=True)[0]
expected = data.count(numeric_only=True)
|
pd.testing.assert_series_equal(result, expected)
|
pandas.testing.assert_series_equal
|
# Parameters
XGB_WEIGHT = 0.6200
BASELINE_WEIGHT = 0.0200
OLS_WEIGHT = 0.0700
NN_WEIGHT = 0.0600
XGB1_WEIGHT = 0.8000 # Weight of first in combination of two XGB models
BASELINE_PRED = 0.0115 # Baseline based on mean of training data, per Oleg
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
import gc
from sklearn.linear_model import LinearRegression
import random
import datetime as dt
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout, BatchNormalization
from keras.layers.advanced_activations import PReLU
from keras.optimizers import Adam
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import Imputer
##### READ IN RAW DATA
print( "\nReading data from disk ...")
prop = pd.read_csv('../input/properties_2016.csv')
train = pd.read_csv("../input/train_2016_v2.csv")
################
################
## LightGBM ##
################
################
# This section is (I think) originally derived from SIDHARTH's script:
# https://www.kaggle.com/sidharthkumar/trying-lightgbm
# which was forked and tuned by <NAME>:
# https://www.kaggle.com/yuqingxue/lightgbm-85-97
# and updated by me (<NAME>):
# https://www.kaggle.com/aharless/lightgbm-with-outliers-remaining
# and a lot of additional changes have happened since then
##### PROCESS DATA FOR LIGHTGBM
print( "\nProcessing data for LightGBM ..." )
for c, dtype in zip(prop.columns, prop.dtypes):
if dtype == np.float64:
prop[c] = prop[c].astype(np.float32)
df_train = train.merge(prop, how='left', on='parcelid')
df_train.fillna(df_train.median(),inplace = True)
x_train = df_train.drop(['parcelid', 'logerror', 'transactiondate', 'propertyzoningdesc',
'propertycountylandusecode', 'fireplacecnt', 'fireplaceflag'], axis=1)
#x_train['Ratio_1'] = x_train['taxvaluedollarcnt']/x_train['taxamount']
y_train = df_train['logerror'].values
print(x_train.shape, y_train.shape)
train_columns = x_train.columns
for c in x_train.dtypes[x_train.dtypes == object].index.values:
x_train[c] = (x_train[c] == True)
del df_train; gc.collect()
x_train = x_train.values.astype(np.float32, copy=False)
d_train = lgb.Dataset(x_train, label=y_train)
##### RUN LIGHTGBM
params = {}
params['max_bin'] = 10
params['learning_rate'] = 0.0021 # shrinkage_rate
params['boosting_type'] = 'gbdt'
params['objective'] = 'regression'
params['metric'] = 'l1' # or 'mae'
params['sub_feature'] = 0.345 # feature_fraction (small values => use very different submodels)
params['bagging_fraction'] = 0.85 # sub_row
params['bagging_freq'] = 40
params['num_leaves'] = 512 # num_leaf
params['min_data'] = 500 # min_data_in_leaf
params['min_hessian'] = 0.05 # min_sum_hessian_in_leaf
params['verbose'] = 0
params['feature_fraction_seed'] = 2
params['bagging_seed'] = 3
np.random.seed(0)
random.seed(0)
print("\nFitting LightGBM model ...")
clf = lgb.train(params, d_train, 430)
del d_train; gc.collect()
del x_train; gc.collect()
print("\nPrepare for LightGBM prediction ...")
print(" Read sample file ...")
sample = pd.read_csv('../input/sample_submission.csv')
print(" ...")
sample['parcelid'] = sample['ParcelId']
print(" Merge with property data ...")
df_test = sample.merge(prop, on='parcelid', how='left')
print(" ...")
del sample, prop; gc.collect()
print(" ...")
#df_test['Ratio_1'] = df_test['taxvaluedollarcnt']/df_test['taxamount']
x_test = df_test[train_columns]
print(" ...")
del df_test; gc.collect()
print(" Preparing x_test...")
for c in x_test.dtypes[x_test.dtypes == object].index.values:
x_test[c] = (x_test[c] == True)
print(" ...")
x_test = x_test.values.astype(np.float32, copy=False)
print("\nStart LightGBM prediction ...")
p_test = clf.predict(x_test)
del x_test; gc.collect()
print( "\nUnadjusted LightGBM predictions:" )
print( pd.DataFrame(p_test).head() )
################
################
## XGBoost ##
################
################
# This section is (I think) originally derived from Infinite Wing's script:
# https://www.kaggle.com/infinitewing/xgboost-without-outliers-lb-0-06463
# inspired by this thread:
# https://www.kaggle.com/c/zillow-prize-1/discussion/33710
# but the code has gone through a lot of changes since then
##### RE-READ PROPERTIES FILE
##### (I tried keeping a copy, but the program crashed.)
print( "\nRe-reading properties file ...")
properties = pd.read_csv('../input/properties_2016.csv')
##### PROCESS DATA FOR XGBOOST
print( "\nProcessing data for XGBoost ...")
for c in properties.columns:
properties[c]=properties[c].fillna(-1)
if properties[c].dtype == 'object':
lbl = LabelEncoder()
lbl.fit(list(properties[c].values))
properties[c] = lbl.transform(list(properties[c].values))
train_df = train.merge(properties, how='left', on='parcelid')
x_train = train_df.drop(['parcelid', 'logerror','transactiondate'], axis=1)
x_test = properties.drop(['parcelid'], axis=1)
# shape
print('Shape train: {}\nShape test: {}'.format(x_train.shape, x_test.shape))
# drop out ouliers
train_df=train_df[ train_df.logerror > -0.4 ]
train_df=train_df[ train_df.logerror < 0.419 ]
x_train=train_df.drop(['parcelid', 'logerror','transactiondate'], axis=1)
y_train = train_df["logerror"].values.astype(np.float32)
y_mean = np.mean(y_train)
print('After removing outliers:')
print('Shape train: {}\nShape test: {}'.format(x_train.shape, x_test.shape))
##### RUN XGBOOST
print("\nSetting up data for XGBoost ...")
# xgboost params
xgb_params = {
'eta': 0.037,
'max_depth': 5,
'subsample': 0.80,
'objective': 'reg:linear',
'eval_metric': 'mae',
'lambda': 0.8,
'alpha': 0.4,
'base_score': y_mean,
'silent': 1
}
dtrain = xgb.DMatrix(x_train, y_train)
dtest = xgb.DMatrix(x_test)
num_boost_rounds = 250
print("num_boost_rounds="+str(num_boost_rounds))
# train model
print( "\nTraining XGBoost ...")
model = xgb.train(dict(xgb_params, silent=1), dtrain, num_boost_round=num_boost_rounds)
print( "\nPredicting with XGBoost ...")
xgb_pred1 = model.predict(dtest)
print( "\nFirst XGBoost predictions:" )
print( pd.DataFrame(xgb_pred1).head() )
##### RUN XGBOOST AGAIN
print("\nSetting up data for XGBoost ...")
# xgboost params
xgb_params = {
'eta': 0.033,
'max_depth': 6,
'subsample': 0.80,
'objective': 'reg:linear',
'eval_metric': 'mae',
'base_score': y_mean,
'silent': 1
}
num_boost_rounds = 150
print("num_boost_rounds="+str(num_boost_rounds))
print( "\nTraining XGBoost again ...")
model = xgb.train(dict(xgb_params, silent=1), dtrain, num_boost_round=num_boost_rounds)
print( "\nPredicting with XGBoost again ...")
xgb_pred2 = model.predict(dtest)
print( "\nSecond XGBoost predictions:" )
print( pd.DataFrame(xgb_pred2).head() )
##### COMBINE XGBOOST RESULTS
xgb_pred = XGB1_WEIGHT*xgb_pred1 + (1-XGB1_WEIGHT)*xgb_pred2
#xgb_pred = xgb_pred1
print( "\nCombined XGBoost predictions:" )
print( pd.DataFrame(xgb_pred).head() )
del train_df
del x_train
del x_test
del properties
del dtest
del dtrain
del xgb_pred1
del xgb_pred2
gc.collect()
######################
######################
## Neural Network ##
######################
######################
# Neural network copied from this script:
# https://www.kaggle.com/aharless/keras-neural-network-lb-06492 (version 20)
# which was built on the skeleton in this notebook:
# https://www.kaggle.com/prasunmishra/ann-using-keras
# Read in data for neural network
print( "\n\nProcessing data for Neural Network ...")
print('\nLoading train, prop and sample data...')
train = pd.read_csv("../input/train_2016_v2.csv", parse_dates=["transactiondate"])
prop = pd.read_csv('../input/properties_2016.csv')
sample = pd.read_csv('../input/sample_submission.csv')
print('Fitting Label Encoder on properties...')
for c in prop.columns:
prop[c]=prop[c].fillna(-1)
if prop[c].dtype == 'object':
lbl = LabelEncoder()
lbl.fit(list(prop[c].values))
prop[c] = lbl.transform(list(prop[c].values))
print('Creating training set...')
df_train = train.merge(prop, how='left', on='parcelid')
df_train["transactiondate"] = pd.to_datetime(df_train["transactiondate"])
df_train["transactiondate_year"] = df_train["transactiondate"].dt.year
df_train["transactiondate_month"] = df_train["transactiondate"].dt.month
df_train['transactiondate_quarter'] = df_train['transactiondate'].dt.quarter
df_train["transactiondate"] = df_train["transactiondate"].dt.day
print('Filling NA/NaN values...' )
df_train.fillna(-1.0)
print('Creating x_train and y_train from df_train...' )
x_train = df_train.drop(['parcelid', 'logerror', 'transactiondate', 'propertyzoningdesc', 'propertycountylandusecode','fireplacecnt', 'fireplaceflag'], axis=1)
y_train = df_train["logerror"]
y_mean = np.mean(y_train)
print(x_train.shape, y_train.shape)
train_columns = x_train.columns
for c in x_train.dtypes[x_train.dtypes == object].index.values:
x_train[c] = (x_train[c] == True)
print('Creating df_test...')
sample['parcelid'] = sample['ParcelId']
print("Merging Sample with property data...")
df_test = sample.merge(prop, on='parcelid', how='left')
df_test["transactiondate"] = pd.to_datetime('2016-11-15') # placeholder value for preliminary version
df_test["transactiondate_year"] = df_test["transactiondate"].dt.year
df_test["transactiondate_month"] = df_test["transactiondate"].dt.month
df_test['transactiondate_quarter'] = df_test['transactiondate'].dt.quarter
df_test["transactiondate"] = df_test["transactiondate"].dt.day
x_test = df_test[train_columns]
print('Shape of x_test:', x_test.shape)
print("Preparing x_test...")
for c in x_test.dtypes[x_test.dtypes == object].index.values:
x_test[c] = (x_test[c] == True)
## Preprocessing
print("\nPreprocessing neural network data...")
imputer= Imputer()
imputer.fit(x_train.iloc[:, :])
x_train = imputer.transform(x_train.iloc[:, :])
imputer.fit(x_test.iloc[:, :])
x_test = imputer.transform(x_test.iloc[:, :])
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_test = sc.transform(x_test)
len_x=int(x_train.shape[1])
print("len_x is:",len_x)
# Neural Network
print("\nSetting up neural network model...")
nn = Sequential()
nn.add(Dense(units = 400 , kernel_initializer = 'normal', input_dim = len_x))
nn.add(PReLU())
nn.add(Dropout(.4))
nn.add(Dense(units = 160 , kernel_initializer = 'normal'))
nn.add(PReLU())
nn.add(BatchNormalization())
nn.add(Dropout(.63))
nn.add(Dense(units = 64 , kernel_initializer = 'normal'))
nn.add(PReLU())
nn.add(BatchNormalization())
nn.add(Dropout(.45))
nn.add(Dense(units = 28, kernel_initializer = 'normal'))
nn.add(PReLU())
nn.add(BatchNormalization())
nn.add(Dropout(.5))
nn.add(Dense(1, kernel_initializer='normal'))
nn.compile(loss='mae', optimizer=Adam(lr=4e-3, decay=1e-4))
print("\nFitting neural network model...")
nn.fit(np.array(x_train), np.array(y_train), batch_size = 32, epochs = 70, verbose=2)
print("\nPredicting with neural network model...")
#print("x_test.shape:",x_test.shape)
y_pred_ann = nn.predict(x_test)
print( "\nPreparing results for write..." )
nn_pred = y_pred_ann.flatten()
print( "Type of nn_pred is ", type(nn_pred) )
print( "Shape of nn_pred is ", nn_pred.shape )
print( "\nNeural Network predictions:" )
print( pd.DataFrame(nn_pred).head() )
# Cleanup
del train
del prop
del sample
del x_train
del x_test
del df_train
del df_test
del y_pred_ann
gc.collect()
################
################
## OLS ##
################
################
# This section is derived from the1owl's notebook:
# https://www.kaggle.com/the1owl/primer-for-the-zillow-pred-approach
# which I (<NAME>) updated and made into a script:
# https://www.kaggle.com/aharless/updated-script-version-of-the1owl-s-basic-ols
np.random.seed(17)
random.seed(17)
print( "\n\nProcessing data for OLS ...")
train =
|
pd.read_csv("../input/train_2016_v2.csv", parse_dates=["transactiondate"])
|
pandas.read_csv
|
from collections import deque
from datetime import datetime
import operator
import numpy as np
import pytest
import pytz
import pandas as pd
import pandas._testing as tm
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
with pytest.raises(TypeError):
x >= y
with pytest.raises(TypeError):
x > y
with pytest.raises(TypeError):
x < y
with pytest.raises(TypeError):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
with pytest.raises(TypeError):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = pd.DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(
np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]
)
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 =
|
pd.DataFrame({"a": arr3})
|
pandas.DataFrame
|
import pandas as pd
import datetime as dt
import yaml
from psaw import PushshiftAPI
pushshift = PushshiftAPI()
class RedditCollector():
"""
Class to collect reddit data.
"""
_REQUIRED_CONFIG = [
"query",
"start_date",
"end_date"
]
def __init__(self, config):
self.pushshift = pushshift
if not isinstance(config, dict):
raise TypeError("config must be a dict")
if not all(element in config for element in self._REQUIRED_CONFIG):
raise ValueError(f"config must contain keys: {', '.join(self._REQUIRED_CONFIG)}")
self.config = config
self.config["start_date"] = int(dt.datetime.strptime(self.config["start_date"], "%Y/%m/%d").timestamp())
self.config["end_date"] = int(dt.datetime.strptime(self.config["end_date"], "%Y/%m/%d").timestamp())
def collect_submissions(self):
"""
Collect reddit submissions for the given configuration
parameters
"""
gen = self.pushshift.search_submissions(
q=self.config["query"],
after=self.config["start_date"],
before=self.config["end_date"]
)
submissions = pd.DataFrame([obj.d_ for obj in gen])
return submissions
def collect_comments(self):
"""
Collect reddit comments for the given configuration
parameters.
"""
gen = self.pushshift.search_comments(
q=self.config["query"],
after=self.config["start_date"],
before=self.config["end_date"]
)
comments =
|
pd.DataFrame([obj.d_ for obj in gen])
|
pandas.DataFrame
|
"""Tests for `models` module."""
import pytest
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_classification
from pipelitools.models import models as m
@pytest.fixture(scope="function")
def df_binary():
X_train, y_train = make_classification(n_samples=100, n_features=2, n_informative=2,
n_redundant=0, n_repeated=0, n_classes=2, n_clusters_per_class=1,
class_sep=2, flip_y=0, weights=[0.5, 0.5], random_state=1)
X_test, y_test = make_classification(n_samples=50, n_features=2, n_informative=2,
n_redundant=0, n_repeated=0, n_classes=2, n_clusters_per_class=1,
class_sep=2, flip_y=0, weights=[0.5, 0.5], random_state=2)
y_train = pd.Series(y_train)
y_test = pd.Series(y_test)
return X_train, y_train, X_test, y_test
@pytest.fixture(scope="function")
def df_multiclass():
X_train, y_train = make_classification(n_samples=100, n_features=2, n_informative=2,
n_redundant=0, n_repeated=0, n_classes=3, n_clusters_per_class=1,
class_sep=2, flip_y=0, weights=[0.2, 0.3, 0.5], random_state=1)
X_test, y_test = make_classification(n_samples=50, n_features=2, n_informative=2,
n_redundant=0, n_repeated=0, n_classes=3, n_clusters_per_class=1,
class_sep=2, flip_y=0, weights=[0.3, 0.3, 0.4], random_state=2)
y_train =
|
pd.Series(y_train)
|
pandas.Series
|
import os
import numpy as np
import pandas as pd
import statsmodels.api as sm
from .analogy_utils import pu, pa, pv
from sklearn.linear_model import Ridge, LinearRegression
def calc_scores(fmri_data, regressors):
results = sm.OLS(fmri_data, regressors).fit()
return results.params[0], results.tvalues[0]
def load_func(paths, settings, sub,
maskname="graymatter-bin_mask",
logger=None):
labels = []
imgs = []
# will need to mask image
for ri, r in enumerate(settings["subjects"][sub]):
imgs.append(
os.path.join(
paths['root'], 'derivatives', sub, 'func',
settings["templates"]["func"].format(sub, r)))
labels.append(pu.load_labels(
paths["root"], 'derivatives', sub, 'func',
settings["templates"]["events"].format(sub, r), logger=logger))
labels = pd.concat(labels).reset_index(drop=True)
fmri_data = pu.concat_imgs(imgs, logger=logger)
return fmri_data, labels
def create_lss_from_lsa(design, trial, tt="AB"):
nuisance_regressor_1 = (design[tt]
.drop(columns=trial)
.sum(axis=1))
nuisance_regressor_2 = (design
.drop(columns="nuisance")
.drop(columns=tt)
.sum(axis=1))
motion_regressor = design["nuisance"]
regressor = design[tt, trial]
regressors = pd.concat([regressor, nuisance_regressor_1,
nuisance_regressor_2, motion_regressor], axis=1)
return regressors
def load_aggregated_data(paths, sub, maskname, logger=None):
mask = pu.load_img(
os.path.join(paths["root"], "derivatives", sub, "masks",
"{}.nii.gz".format(maskname)), logger=logger)
fmri_data = []
des_mats = []
labels = []
s = sub
for r in ["run-01", "run-02", "run-03", "run-04",
"run-05", "run-06", "run-07", "run-08"]:
data, des, label = load_run_and_regressors(paths, s, r, mask)
fmri_data.append(data - data.mean(axis=0))
# sort by index then join
tags = []
tt = []
for d in des.names:
if ":" in d:
ttype = d.split("_")[1]
tt.append(ttype)
trial = d.split("_")[0]
if ttype == "AB":
tags.append(trial.split("::")[0])
elif ttype == "CD":
tags.append(trial.split("::")[1])
elif ttype == "Probe":
tags.append(trial)
else:
tags.append(d)
tt.append("nuisance")
headers = pd.MultiIndex.from_tuples(
zip(tt, tags), names=["type", "tag"])
des_mats.append(
|
pd.DataFrame(des.matrix, columns=headers)
|
pandas.DataFrame
|
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import random
import seaborn as sns
import pandas as pd
import csv
import matplotlib.pyplot as plt
class game:
def __init__(self, length):
self.length = length
self.blue = 0
self.red = length + 1
self.time = 0
self.completed = False
self.winner = ""
pass
def __repr__(self):
s = "t: "
s = s + str(self.time)
s = s + "\nBlue: "
s = s + str(self.blue)
s = s + "\nRed: "
s = s + str(self.red)
return s
def step(self):
# here we simulate a step
self.time += 1
if(self.blue + 1 > self.red - 1):
# resolve conflict
self.resolve_conflict()
else:
self.blue = self.blue + 1
self.red = self.red - 1
self.check_for_end()
def resolve_conflict(self):
# dist = self.red - self.blue
# if dist == 1:
# Meet halfway
rps = self.play_r_p_s()
if rps == 0:
# Tie
return
if rps == 1:
# Blue wins
self.red = self.length + 1
self.blue += 1
return
if rps == 2:
# Red Wins
self.red -= 1
self.blue = 0
def check_for_end(self):
self.completed = (self.blue == self.length) \
or (self.red == 1)
if(self.completed):
# print("WINNER")
if(self.blue <= 1):
self.winner = "Red"
else:
self.winner = "Blue"
def play_r_p_s(self):
return random.randint(0,3)
list_of_means = []
list_of_medians = []
# for how_many_hoops in range(5,200, 20):
for how_many_hoops in range(5,600, 10):
red_win = []
blue_win = []
for i in range(1,20):
print(how_many_hoops)
x = game(how_many_hoops)
while(not x.completed):
x.step()
# print(x.winner)
# print(x.time)
if x.winner == "Blue":
blue_win.append(x.time)
else:
red_win.append(x.time)
blue_win =
|
pd.DataFrame(blue_win)
|
pandas.DataFrame
|
import calendar
from datetime import datetime
import locale
import unicodedata
import numpy as np
import pytest
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
Timedelta,
Timestamp,
date_range,
offsets,
)
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray
class TestDatetime64:
def test_no_millisecond_field(self):
msg = "type object 'DatetimeIndex' has no attribute 'millisecond'"
with pytest.raises(AttributeError, match=msg):
DatetimeIndex.millisecond
msg = "'DatetimeIndex' object has no attribute 'millisecond'"
with pytest.raises(AttributeError, match=msg):
DatetimeIndex([]).millisecond
def test_datetimeindex_accessors(self):
dti_naive = date_range(freq="D", start=datetime(1998, 1, 1), periods=365)
# GH#13303
dti_tz = date_range(
freq="D", start=datetime(1998, 1, 1), periods=365, tz="US/Eastern"
)
for dti in [dti_naive, dti_tz]:
assert dti.year[0] == 1998
assert dti.month[0] == 1
assert dti.day[0] == 1
assert dti.hour[0] == 0
assert dti.minute[0] == 0
assert dti.second[0] == 0
assert dti.microsecond[0] == 0
assert dti.dayofweek[0] == 3
assert dti.dayofyear[0] == 1
assert dti.dayofyear[120] == 121
assert dti.isocalendar().week[0] == 1
assert dti.isocalendar().week[120] == 18
assert dti.quarter[0] == 1
assert dti.quarter[120] == 2
assert dti.days_in_month[0] == 31
assert dti.days_in_month[90] == 30
assert dti.is_month_start[0]
assert not dti.is_month_start[1]
assert dti.is_month_start[31]
assert dti.is_quarter_start[0]
assert dti.is_quarter_start[90]
assert dti.is_year_start[0]
assert not dti.is_year_start[364]
assert not dti.is_month_end[0]
assert dti.is_month_end[30]
assert not dti.is_month_end[31]
assert dti.is_month_end[364]
assert not dti.is_quarter_end[0]
assert not dti.is_quarter_end[30]
assert dti.is_quarter_end[89]
assert dti.is_quarter_end[364]
assert not dti.is_year_end[0]
assert dti.is_year_end[364]
assert len(dti.year) == 365
assert len(dti.month) == 365
assert len(dti.day) == 365
assert len(dti.hour) == 365
assert len(dti.minute) == 365
assert len(dti.second) == 365
assert len(dti.microsecond) == 365
assert len(dti.dayofweek) == 365
assert len(dti.dayofyear) == 365
assert len(dti.isocalendar()) == 365
assert len(dti.quarter) == 365
assert len(dti.is_month_start) == 365
assert len(dti.is_month_end) == 365
assert len(dti.is_quarter_start) == 365
assert len(dti.is_quarter_end) == 365
assert len(dti.is_year_start) == 365
assert len(dti.is_year_end) == 365
dti.name = "name"
# non boolean accessors -> return Index
for accessor in DatetimeArray._field_ops:
if accessor in ["week", "weekofyear"]:
# GH#33595 Deprecate week and weekofyear
continue
res = getattr(dti, accessor)
assert len(res) == 365
assert isinstance(res, Index)
assert res.name == "name"
# boolean accessors -> return array
for accessor in DatetimeArray._bool_ops:
res = getattr(dti, accessor)
assert len(res) == 365
assert isinstance(res, np.ndarray)
# test boolean indexing
res = dti[dti.is_quarter_start]
exp = dti[[0, 90, 181, 273]]
tm.assert_index_equal(res, exp)
res = dti[dti.is_leap_year]
exp = DatetimeIndex([], freq="D", tz=dti.tz, name="name")
tm.assert_index_equal(res, exp)
def test_datetimeindex_accessors2(self):
dti = date_range(freq="BQ-FEB", start=datetime(1998, 1, 1), periods=4)
assert sum(dti.is_quarter_start) == 0
assert sum(dti.is_quarter_end) == 4
assert sum(dti.is_year_start) == 0
assert sum(dti.is_year_end) == 1
def test_datetimeindex_accessors3(self):
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay,
bday_egypt = offsets.CustomBusinessDay(weekmask="Sun Mon Tue Wed Thu")
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
msg = "Custom business days is not supported by is_month_start"
with pytest.raises(ValueError, match=msg):
dti.is_month_start
def test_datetimeindex_accessors4(self):
dti = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-03"])
assert dti.is_month_start[0] == 1
def test_datetimeindex_accessors5(self):
with tm.assert_produces_warning(FutureWarning, match="The 'freq' argument"):
tests = [
(Timestamp("2013-06-01", freq="M").is_month_start, 1),
(Timestamp("2013-06-01", freq="BM").is_month_start, 0),
(Timestamp("2013-06-03", freq="M").is_month_start, 0),
(Timestamp("2013-06-03", freq="BM").is_month_start, 1),
(Timestamp("2013-02-28", freq="Q-FEB").is_month_end, 1),
(Timestamp("2013-02-28", freq="Q-FEB").is_quarter_end, 1),
(Timestamp("2013-02-28", freq="Q-FEB").is_year_end, 1),
(Timestamp("2013-03-01", freq="Q-FEB").is_month_start, 1),
(Timestamp("2013-03-01", freq="Q-FEB").is_quarter_start, 1),
(Timestamp("2013-03-01", freq="Q-FEB").is_year_start, 1),
(Timestamp("2013-03-31", freq="QS-FEB").is_month_end, 1),
(Timestamp("2013-03-31", freq="QS-FEB").is_quarter_end, 0),
(
|
Timestamp("2013-03-31", freq="QS-FEB")
|
pandas.Timestamp
|
# ******************************************************************************
# Copyright 2020-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
""" Module containing the main Dash app code.
The main function in this module is configureApp() which starts
the Dash app on a Flask server and configures it with callbacks.
"""
import os
import dash
import pandas as pd
from flask import Flask
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from termcolor import cprint
import appLayout
import plotFunc
def configureApp(Task):
app = start_app(Task)
@app.callback(Output("save_head_layer", "children"), [Input("head_matrix", "clickData")])
def save_head_matrix_click(click_data):
if click_data is None:
return_dict = dict(head_no="6", layer_no="9")
else:
x, y = get_click_coords(click_data)
return_dict = dict(head_no=x, layer_no=y)
return return_dict
@app.callback(
[Output("head_matrix", "figure"), Output("range_slider_copy", "value")],
[
Input("save_clicked_point", "children"),
Input("save_selected_points", "children"),
Input("head_matrix_options", "value"),
Input("save_head_layer", "children"),
Input("auto_rescale", "value"),
Input("range_slider", "value"),
],
[State("multiselection", "children")],
)
def plot_head_summary(
saved_click,
saved_points,
head_matrix_option,
head_layer,
auto_rescale,
range_slider,
multiselection,
):
ctx = dash.callback_context
input_trigger = get_input_trigger(ctx)
if input_trigger == "save_selected_points" or multiselection:
saved_points = pd.read_json(saved_points, orient="split")
indices = saved_points["id"]
else:
saved_click = pd.read_json(saved_click, orient="split").iloc[0]
indices = [saved_click["id"]]
z = Task.get_head_matrix(head_matrix_option, indices)
if auto_rescale == "Manual":
z_max = range_slider[1]
z_min = range_slider[0]
else:
z_max = z.max()
z_min = z.min()
range_slider = [z_min, z_max]
fig = plotFunc.plot_head_matrix(z, head_layer, z_max, z_min)
return fig, range_slider
@app.callback(
Output("range_slider", "value"),
[Input("range_slider_copy", "value")],
)
def copy_range_slider_value(range_slider):
return range_slider
@app.callback(
Output("attn_head", "figure"),
[
Input("save_clicked_point", "children"),
Input("save_head_layer", "children"),
Input("attn_map_options", "value"),
Input("attn_map_toggle", "value"),
],
)
def plot_attn_map(saved_click, head_layer, attn_map_option, attn_map_toggle):
saved_click = pd.read_json(saved_click, orient="split").iloc[0]
layer = int(head_layer["layer_no"])
head = int(head_layer["head_no"])
example_id = saved_click["id"]
attns, tokens = Task.get_attn_map(attn_map_option, example_id)
head_id = attns[layer][head]
disabled_tokens = ["[CLS]", "[SEP]"]
if attn_map_toggle == "map":
src = plotFunc.plot_attn_map(tokens, head_id, disabled_tokens)
elif attn_map_toggle == "matrix":
src = plotFunc.plot_attn_matrix(tokens, head_id)
return src
@app.callback(
Output("tsneMap", "figure"),
[
Input("tsne_plot_options", "value"),
Input("layer_slider", "value"),
Input("save_clicked_point", "children"),
Input("model_selector", "value"),
],
)
def plot_tsne(dropdown_color_option, layer_slider_val, saved_click, model_selector_val):
saved_click = pd.read_json(saved_click, orient="split")
other_rows, selected_rows = Task.get_tsne_rows(saved_click, model_selector_val)
df_column_to_plot = Task.get_df_col_to_plot()
dropdown_to_df_col_map = (
Task.get_dropdown_to_df_col_map()
) # Maps dropdown options to dataframe columns
val_to_label = Task.get_val_to_label_map(
dropdown_color_option
) # Maps values for the column to labels based on dropdown_color_option
figure = plotFunc.plot_tsne(
df_column_to_plot,
other_rows,
dropdown_color_option,
layer_slider_val,
selected_rows,
dropdown_to_df_col_map,
val_to_label,
)
return figure
@app.callback(
Output("save_clicked_point", "children"),
[Input("tsneMap", "clickData")],
[State("layer_slider", "value"), State("model_selector", "value")],
)
def save_tsne_map_click(click_data, layer_slider_val, model_selector_val):
if click_data is None:
# Default row to use on startup
df_temp = Task.map_model_to_df["model1"]
# Selecting as default the example of the paper
selected_row = (
df_temp[df_temp["sentence"].str.contains("got back")].head(1)
if Task.get_name() == "wsc"
else df_temp.head(1)
)
else:
# Querying row based on tsne map click
selected_point = pd.DataFrame(click_data["points"])[["x", "y"]]
x_coord = selected_point["x"].iloc[0]
y_coord = selected_point["y"].iloc[0]
model_id = Task.map_model_name_to_id[model_selector_val]
curr_model_df = Task.map_model_to_df[model_id]
selected_row = curr_model_df[
(curr_model_df[f"layer_{layer_slider_val:02}_tsne_x"] == x_coord)
& (curr_model_df[f"layer_{layer_slider_val:02}_tsne_y"] == y_coord)
]
# Saving row corresponding to clicked aspect
return selected_row.to_json(orient="split")
@app.callback(
Output("save_selected_points", "children"),
[Input("tsneMap", "selectedData")],
[State("layer_slider", "value"), State("model_selector", "value")],
prevent_initial_call=True,
)
def save_tsne_map_selection(selected_data, layer_slider_val, model_selector_val):
if selected_data is None:
# Don't do anything on startup
raise PreventUpdate
else:
# Querying rows based on tsne map selection
selected_points = pd.DataFrame(selected_data["points"])[["x", "y"]]
model_id = Task.map_model_name_to_id[model_selector_val]
selected_rows = Task.map_model_to_df[model_id].merge(
selected_points,
left_on=[
f"layer_{layer_slider_val:02}_tsne_x",
f"layer_{layer_slider_val:02}_tsne_y",
],
right_on=["x", "y"],
)
# Saving row corresponding to clicked aspect
return selected_rows.to_json(orient="split")
@app.callback(
Output("multiselection", "children"),
[
Input("tsneMap", "clickData"),
Input("tsneMap", "selectedData"),
],
)
def set_multiselection_bool(click_data, selected_data):
if click_data is None:
# Default to false on startup
return False
# Checking trigger
ctx = dash.callback_context
input_trigger = get_input_trigger_full(ctx)
if input_trigger == "tsneMap.selectedData":
return True
else:
return False
@app.callback(Output("sentence", "children"), [Input("save_clicked_point", "children")])
def display_sentence(saved_click):
saved_click = pd.read_json(saved_click, orient="split")
return saved_click["sentence"]
@app.callback(Output("table", "data"), [Input("save_clicked_point", "children")])
def update_summary_table(saved_click):
saved_click = pd.read_json(saved_click, orient="split").iloc[0]
return_list = Task.get_summary_table(saved_click)
return return_list
if Task.get_name() == "wsc":
@app.callback(
Output("layer_dist_plot", "figure"),
[
Input("save_clicked_point", "children"),
Input("save_selected_points", "children"),
Input("layer_dist_plot_options", "value"),
Input("model_selector", "value"),
],
[State("multiselection", "children")],
)
def plot_dist_per_layer(
saved_click, saved_points, option, model_selector_val, multiselection
):
ctx = dash.callback_context
input_trigger = get_input_trigger(ctx)
if input_trigger == "save_selected_points" or multiselection:
saved_points =
|
pd.read_json(saved_points, orient="split")
|
pandas.read_json
|
# -*- coding: utf-8 -*-
""" test function application """
import pytest
from string import ascii_lowercase
from pandas import (date_range, Timestamp,
Index, MultiIndex, DataFrame, Series)
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pandas.compat import product as cart_product
import numpy as np
import pandas.util.testing as tm
import pandas as pd
from .common import MixIn
# describe
# --------------------------------
class TestDescribe(MixIn):
def test_apply_describe_bug(self):
grouped = self.mframe.groupby(level='first')
grouped.describe() # it works!
def test_series_describe_multikey(self):
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
assert_series_equal(result['mean'], grouped.mean(), check_names=False)
assert_series_equal(result['std'], grouped.std(), check_names=False)
assert_series_equal(result['min'], grouped.min(), check_names=False)
def test_series_describe_single(self):
ts = tm.makeTimeSeries()
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x.describe())
expected = grouped.describe().stack()
assert_series_equal(result, expected)
def test_series_index_name(self):
grouped = self.df.loc[:, ['C']].groupby(self.df['A'])
result = grouped.agg(lambda x: x.mean())
assert result.index.name == 'A'
def test_frame_describe_multikey(self):
grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
desc_groups = []
for col in self.tsframe:
group = grouped[col].describe()
# GH 17464 - Remove duplicate MultiIndex levels
group_col = pd.MultiIndex(
levels=[[col], group.columns],
labels=[[0] * len(group.columns), range(len(group.columns))])
group = pd.DataFrame(group.values,
columns=group_col,
index=group.index)
desc_groups.append(group)
expected = pd.concat(desc_groups, axis=1)
tm.assert_frame_equal(result, expected)
groupedT = self.tsframe.groupby({'A': 0, 'B': 0,
'C': 1, 'D': 1}, axis=1)
result = groupedT.describe()
expected = self.tsframe.describe().T
expected.index = pd.MultiIndex(
levels=[[0, 1], expected.index],
labels=[[0, 0, 1, 1], range(len(expected.index))])
tm.assert_frame_equal(result, expected)
def test_frame_describe_tupleindex(self):
# GH 14848 - regression from 0.19.0 to 0.19.1
df1 = DataFrame({'x': [1, 2, 3, 4, 5] * 3,
'y': [10, 20, 30, 40, 50] * 3,
'z': [100, 200, 300, 400, 500] * 3})
df1['k'] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5
df2 = df1.rename(columns={'k': 'key'})
pytest.raises(ValueError, lambda: df1.groupby('k').describe())
pytest.raises(ValueError, lambda: df2.groupby('key').describe())
def test_frame_describe_unstacked_format(self):
# GH 4792
prices = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 24990,
pd.Timestamp('2011-01-06 12:43:33', tz=None): 25499,
pd.Timestamp('2011-01-06 12:54:09', tz=None): 25499}
volumes = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
pd.Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
pd.Timestamp('2011-01-06 12:54:09', tz=None): 100000000}
df = pd.DataFrame({'PRICE': prices,
'VOLUME': volumes})
result = df.groupby('PRICE').VOLUME.describe()
data = [df[df.PRICE == 24990].VOLUME.describe().values.tolist(),
df[df.PRICE == 25499].VOLUME.describe().values.tolist()]
expected = pd.DataFrame(data,
index=pd.Index([24990, 25499], name='PRICE'),
columns=['count', 'mean', 'std', 'min',
'25%', '50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# nunique
# --------------------------------
class TestNUnique(MixIn):
def test_series_groupby_nunique(self):
def check_nunique(df, keys, as_index=True):
for sort, dropna in cart_product((False, True), repeat=2):
gr = df.groupby(keys, as_index=as_index, sort=sort)
left = gr['julie'].nunique(dropna=dropna)
gr = df.groupby(keys, as_index=as_index, sort=sort)
right = gr['julie'].apply(Series.nunique, dropna=dropna)
if not as_index:
right = right.reset_index(drop=True)
assert_series_equal(left, right, check_names=False)
days = date_range('2015-08-23', periods=10)
for n, m in cart_product(10 ** np.arange(2, 6), (10, 100, 1000)):
frame = DataFrame({
'jim': np.random.choice(
list(ascii_lowercase), n),
'joe': np.random.choice(days, n),
'julie': np.random.randint(0, m, n)
})
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
frame.loc[1::17, 'jim'] = None
frame.loc[3::37, 'joe'] = None
frame.loc[7::19, 'julie'] = None
frame.loc[8::19, 'julie'] = None
frame.loc[9::19, 'julie'] = None
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
check_nunique(frame, ['jim'], as_index=False)
check_nunique(frame, ['jim', 'joe'], as_index=False)
def test_nunique(self):
df = DataFrame({
'A': list('abbacc'),
'B': list('abxacc'),
'C': list('abbacx'),
})
expected = DataFrame({'A': [1] * 3, 'B': [1, 2, 1], 'C': [1, 1, 2]})
result = df.groupby('A', as_index=False).nunique()
tm.assert_frame_equal(result, expected)
# as_index
expected.index = list('abc')
expected.index.name = 'A'
result = df.groupby('A').nunique()
tm.assert_frame_equal(result, expected)
# with na
result = df.replace({'x': None}).groupby('A').nunique(dropna=False)
tm.assert_frame_equal(result, expected)
# dropna
expected = DataFrame({'A': [1] * 3, 'B': [1] * 3, 'C': [1] * 3},
index=list('abc'))
expected.index.name = 'A'
result = df.replace({'x': None}).groupby('A').nunique()
|
tm.assert_frame_equal(result, expected)
|
pandas.util.testing.assert_frame_equal
|
import asyncio
import os
import uuid
import pandas as pd
import pytest
from storey import build_flow, CSVSource, CSVTarget, SyncEmitSource, Reduce, Map, FlatMap, AsyncEmitSource, ParquetTarget
from .integration_test_utils import _generate_table_name
has_azure_credentials = os.getenv("AZURE_ACCOUNT_NAME") and os.getenv("AZURE_ACCOUNT_KEY") and os.getenv("AZURE_BLOB_STORE")
if has_azure_credentials:
storage_options = {"account_name": os.getenv("AZURE_ACCOUNT_NAME"), "account_key": os.getenv("AZURE_ACCOUNT_KEY")}
from adlfs import AzureBlobFileSystem
@pytest.fixture()
def azure_create_csv():
# Setup
azure_blob = os.getenv("AZURE_BLOB_STORE")
file_path = _generate_table_name(f'{azure_blob}/az_storey')
_write_test_csv(file_path)
# Test runs
yield file_path
# Teardown
_delete_file(file_path)
@pytest.fixture()
def azure_teardown_file():
# Setup
azure_blob = os.getenv("AZURE_BLOB_STORE")
file_path = _generate_table_name(f'{azure_blob}/az_storey')
# Test runs
yield file_path
# Teardown
_delete_file(file_path)
@pytest.fixture()
def azure_setup_teardown_test():
# Setup
table_name = _generate_table_name(f'{os.getenv("AZURE_BLOB_STORE")}/test')
# Test runs
yield table_name
# Teardown
azure_recursive_delete(table_name)
def _write_test_csv(file_path):
az_fs = AzureBlobFileSystem(**storage_options)
data = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
with az_fs.open(file_path, 'w') as f:
f.write(data)
def _delete_file(path):
az_fs = AzureBlobFileSystem(**storage_options)
az_fs.delete(path)
def azure_recursive_delete(path):
az_fs = AzureBlobFileSystem(**storage_options)
az_fs.rm(path, True)
@pytest.mark.skipif(not has_azure_credentials, reason='No azure credentials found')
def test_csv_reader_from_azure(azure_create_csv):
controller = build_flow([
CSVSource(f'az:///{azure_create_csv}', header=True, storage_options=storage_options),
FlatMap(lambda x: x),
Map(lambda x: int(x)),
Reduce(0, lambda acc, x: acc + x),
]).run()
termination_result = controller.await_termination()
assert termination_result == 495
@pytest.mark.skipif(not has_azure_credentials, reason='No azure credentials found')
def test_csv_reader_from_azure_error_on_file_not_found():
controller = build_flow([
CSVSource(f'az:///{os.getenv("AZURE_BLOB_STORE")}/idontexist.csv', header=True, storage_options=storage_options),
]).run()
try:
controller.await_termination()
assert False
except FileNotFoundError:
pass
async def async_test_write_csv_to_azure(azure_teardown_csv):
controller = build_flow([
AsyncEmitSource(),
CSVTarget(f'az:///{azure_teardown_csv}', columns=['n', 'n*10'], header=True, storage_options=storage_options)
]).run()
for i in range(10):
await controller.emit([i, 10 * i])
await controller.terminate()
await controller.await_termination()
actual = AzureBlobFileSystem(**storage_options).open(azure_teardown_csv).read()
expected = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert actual.decode("utf-8") == expected
@pytest.mark.skipif(not has_azure_credentials, reason='No azure credentials found')
def test_write_csv_to_azure(azure_teardown_file):
asyncio.run(async_test_write_csv_to_azure(azure_teardown_file))
@pytest.mark.skipif(not has_azure_credentials, reason='No azure credentials found')
def test_write_csv_with_dict_to_azure(azure_teardown_file):
file_path = f'az:///{azure_teardown_file}'
controller = build_flow([
SyncEmitSource(),
CSVTarget(file_path, columns=['n', 'n*10'], header=True, storage_options=storage_options)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i})
controller.terminate()
controller.await_termination()
actual = AzureBlobFileSystem(**storage_options).open(azure_teardown_file).read()
expected = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert actual.decode("utf-8") == expected
@pytest.mark.skipif(not has_azure_credentials, reason='No azure credentials found')
def test_write_csv_infer_columns_without_header_to_azure(azure_teardown_file):
file_path = f'az:///{azure_teardown_file}'
controller = build_flow([
SyncEmitSource(),
CSVTarget(file_path, storage_options=storage_options)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i})
controller.terminate()
controller.await_termination()
actual = AzureBlobFileSystem(**storage_options).open(azure_teardown_file).read()
expected = "0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert actual.decode("utf-8") == expected
@pytest.mark.skipif(not has_azure_credentials, reason='No azure credentials found')
def test_write_csv_from_lists_with_metadata_and_column_pruning_to_azure(azure_teardown_file):
file_path = f'az:///{azure_teardown_file}'
controller = build_flow([
SyncEmitSource(),
CSVTarget(file_path, columns=['event_key=$key', 'n*10'], header=True, storage_options=storage_options)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i}, key=f'key{i}')
controller.terminate()
controller.await_termination()
actual = AzureBlobFileSystem(**storage_options).open(azure_teardown_file).read()
expected = "event_key,n*10\nkey0,0\nkey1,10\nkey2,20\nkey3,30\nkey4,40\nkey5,50\nkey6,60\nkey7,70\nkey8,80\nkey9,90\n"
assert actual.decode("utf-8") == expected
@pytest.mark.skipif(not has_azure_credentials, reason='No azure credentials found')
def test_write_to_parquet_to_azure(azure_setup_teardown_test):
out_dir = f'az:///{azure_setup_teardown_test}'
columns = ['my_int', 'my_string']
controller = build_flow([
SyncEmitSource(),
ParquetTarget(out_dir, partition_cols='my_int', columns=columns, max_events=1, storage_options=storage_options)
]).run()
expected = []
for i in range(10):
controller.emit([i, f'this is {i}'])
expected.append([i, f'this is {i}'])
expected = pd.DataFrame(expected, columns=columns, dtype='int32')
controller.terminate()
controller.await_termination()
read_back_df =
|
pd.read_parquet(out_dir, columns=columns, storage_options=storage_options)
|
pandas.read_parquet
|
DESC='''
MSC network properties
'''
import argparse
from itertools import product
import networkx as nx
import numpy as np
from os import environ,getenv
from os.path import basename
import pandas as pd
from pdb import set_trace
import scipy
import sqlite3
import sys
sys.path.append('../../../SPREAD_multipathway_simulator/simulator/scripts')
import msc_network as msc
pd.options.mode.chained_assignment = None
def main():
# parser
parser=argparse.ArgumentParser(description=DESC,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-i", "--network", required = True, help="MSC network")
parser.add_argument("-r", "--range", required = True, type = float, help="range")
parser.add_argument("-o", "--output_prefix", required = True, help="out prefix")
args = parser.parse_args()
# read network using msc
network = msc.MultiScaleNet()
network.read_from_file(args.network)
network.display_summary()
network.nodes[0] = network.nodes[0].set_index('node')
network.edges[0] = network.edges[0][network.edges[0].moore <= args.range]
hierarchy = network.hierarchy[network.hierarchy.parent != -1]
# month
sd_edges = network.edges[0][['source', 'target']]
intra_loc_edges = hierarchy[
['parent', 'child']].groupby('parent').apply(
lambda x: nx.to_pandas_edgelist(
nx.complete_graph(x.child.tolist()))).reset_index()[
['source', 'target']]
with open(f'{args.output_prefix}_{args.range}.csv', 'w') as f:
f.write('network,range,month,spectral_radius,spectral_radius_unweighted,diameter\n')
for m in range(1,13):
# short distance
cells = network.nodes[0][f'{m}']
sd_edges['weight'] = sd_edges.source.map(cells)
# intra-locality
intra_loc_edges['weight'] = intra_loc_edges.source.map(cells)
# inter-locality
Fld = network.edges[1][network.edges[1].month == m]
meta_edge_list = []
for ind, e in Fld.iterrows():
x = hierarchy[hierarchy.parent == e.source].child.to_list()
y = hierarchy[hierarchy.parent == e.target].child.to_list()
meta_edge = pd.DataFrame.from_records(product(x,y)).rename(columns = {
0: 'source', 1:'target'})
meta_edge['weight'] = meta_edge.source.map(cells) * e.weight
meta_edge_list.append(meta_edge)
inter_loc_edges = pd.concat(meta_edge_list)
# combine networks
edges =
|
pd.concat([sd_edges, intra_loc_edges, inter_loc_edges])
|
pandas.concat
|
""" dockstring to make pylint happy.
"""
# import
import os
import uuid
import requests
import numpy as np
import pandas as pd
import plotly.express as px
from dotenv import load_dotenv
from IPython.display import display
from geopy.geocoders import Nominatim
## Utils functions
# coordinates of cities
def cities_to_coords(city_name) :
""" get cities coordiniates.
"""
# geocoder
geolocator = Nominatim(user_agent="app")
location = geolocator.geocode(city_name)
# full_address, latitude, longitude
full_address = location.address
latitude = location.latitude
longitude = location.longitude
# return
return pd.Series([full_address, latitude, longitude])
# volume rain from oepn weather
def volume_rain(lat, lon, exclude, appid) :
""" volume of rain in each city over 7 next days.
"""
# open weather one call api
params = (
('lat', lat),
('lon', lon),
('exclude', exclude),
('appid', appid)
)
response = requests.get('https://api.openweathermap.org/data/2.5/onecall', params=params)
# normalize reponse
open_weather_7days = pd.json_normalize(response.json()["daily"])
# clean rain column
open_weather_7days["rain"] = open_weather_7days["rain"].apply(lambda x : 0 if np.isnan(x) else x)
# calculate volumn
n_days = open_weather_7days.shape[0]
volume_rain_7days = np.sum(open_weather_7days["rain"] * open_weather_7days["pop"]) / n_days
# return
return round(volume_rain_7days,3)
# plot function
def plot(df, lat_col, lon_col, color_col, mapbox_token_file, figname, zoom=4) :
# read token file
px.set_mapbox_access_token(open(mapbox_token_file).read())
# fig
fig = px.scatter_mapbox(df, lat=lat_col, lon=lon_col,
color=color_col, size=color_col,
color_continuous_scale=px.colors.cyclical.IceFire,
size_max=15, zoom=zoom)
# save fig
fig.write_image(figname)
# fig.show()
## Workflow
# env variables
load_dotenv()
OPEN_WEATHER_TOKEN = os.getenv('Open_weather_token')
# french top 35 cities
french_top_35 = ["<NAME>", "St Malo", "Bayeux", "Le Havre",
"Rouen", "Paris", "Amiens", "Lille", "Strasbourg",
"<NAME>", "Colmar", "Eguisheim",
"Besancon", "Dijon", "Annecy", "Grenoble", "Lyon",
"<NAME>", "<NAME>", "Cassis",
"Marseille", "Aix en Provence", "Avignon", "Uzes", "Nimes",
"<NAME>", "Saintes Maries de la mer", "Collioure",
"Carcassonne", "Ariege", "Toulouse", "Montauban", "Biarritz",
"Bayonne", "La Rochelle"]
# dataset
weather_data = "../data/temp/weather_data.csv"
overwrite = False
if not os.path.exists(weather_data) or overwrite==True:
# init dataframe
weather_df = pd.DataFrame(french_top_35, columns=["cities"])
display(weather_df.sample(2))
# cities to coords
if not "full_address" in weather_df.columns :
weather_df[["full_address", "latitude", "longitude"]] = weather_df["cities"].apply(cities_to_coords)
display(weather_df.sample(2))
else :
display(weather_df.sample(2))
print("full_address, latitude & longitude columns exists !")
# fill weather_df
exclude = 'current,minutely,hourly,alerts'
appid = OPEN_WEATHER_TOKEN
if not "volume_rain_7days" in weather_df.columns :
weather_df["volume_rain_7days"] = weather_df[["latitude", "longitude"]].transpose().apply(lambda x : volume_rain(x[0], x[1], exclude, appid))
display(weather_df.sample(2))
# unique identifier (uuid)
if not "uuid" in weather_df.columns :
weather_df['uuid'] = weather_df.index.to_series().map(lambda x: uuid.uuid4())
display(weather_df.sample(2))
# reorder columns
keep_col = ['uuid', 'cities', 'full_address', 'latitude', 'longitude', 'volume_rain_7days']
weather_df = weather_df[keep_col]
display(weather_df.sample(2))
# save dataframe
weather_df.to_csv(weather_data)
else :
weather_df =
|
pd.read_csv(weather_data, index_col=[0])
|
pandas.read_csv
|
# Databricks notebook source
# MAGIC %md
# MAGIC # Book 1: Data Ingestion
# MAGIC This notebook executes the **first step of a data science process**, to download the raw data sets and store them as Spark Dataframes accessible to your Azure Databricks instance.
# MAGIC
# MAGIC To examine the SPARK data frames constructed, an optional 1a_raw_data_exploring notebook has been included in the repository and copied to your Azure Databricks Workspace. You must run this data ingestion notebook before running the exploration notebook cells. The exploration notebook details the simulated data sets we used for this predictive maintenance solution example.
# MAGIC
# MAGIC ### Data sources
# MAGIC The common data elements for predictive maintenance problems can be summarized as follows:
# MAGIC
# MAGIC **Machine features:** The features specific to each individual machine, e.g. engine size, make, model, location, installation date.
# MAGIC Telemetry data: The operating condition data collected from sensors, e.g. temperature, vibration, operating speeds, pressures.
# MAGIC Maintenance history: The repair history of a machine, e.g. maintenance activities or component replacements, this can also include error code or runtime message logs.
# MAGIC Failure history: The failure history of a machine or component of interest.
# MAGIC It is possible that failure history is contained within maintenance history, either as in the form of special error codes or order dates for spare parts. In those cases, failures can be extracted from the maintenance data. Additionally, different business domains may have a variety of other data sources that influence failure patterns which are not listed here exhaustively. These should be identified by consulting the domain experts when building predictive models.
# MAGIC
# MAGIC Some examples of above data elements from use cases are:
# MAGIC
# MAGIC * Machine conditions and usage: Flight routes and times, sensor data collected from aircraft engines, sensor readings from ATM transactions, train events data, sensor readings from wind turbines, elevators and connected cars.
# MAGIC
# MAGIC * Machine features: Circuit breaker technical specifications such as voltage levels, geolocation or car features such as make, model, engine size, tire types, production facility etc.
# MAGIC
# MAGIC * Failure history: fight delay dates, aircraft component failure dates and types, ATM cash withdrawal transaction failures, train/elevator door failures, brake disk replacement order dates, wind turbine failure dates and circuit breaker command failures.
# MAGIC
# MAGIC * Maintenance history: Flight error logs, ATM transaction error logs, train maintenance records including maintenance type, short description etc. and circuit breaker maintenance records.
# MAGIC
# MAGIC Given the above data sources, the two main data types we observe in predictive maintenance domain are temporal data and static data. Failure history, machine conditions, repair history, usage history are time series indicated by the timestamp of data collection. Machine and operator specific features, are more static, since they usually describe the technical specifications of machines or operator’s properties.
# MAGIC
# MAGIC For this scenario, we use a relatively large-scale data to walk you through the main steps from data ingestion (this Jupyter notebook), feature engineering, model building, and model operationalization and deployment. The code for the entire process is written in PySpark and implemented using Jupyter notebooks within Azure Databricks. We use Azure Databricks scheduled notebooks to simulate creating failure predictions in batch scenarios.
# MAGIC
# MAGIC ### Step 1: Data Ingestion
# MAGIC This data aquisiton notebook will download the simulated predictive maintenance data sets **from our GitHub data store.** We do some preliminary data cleaning and store the results as a Spark data frame on the Azure Cluster for use in the remaining notebook steps of this analysis.
# COMMAND ----------
## Setup our environment by importing required libraries
# Github has been having some timeout issues. This should fix the problem for this dataset.
import socket
socket.setdefaulttimeout(90)
import glob
import os
# Read csv file from URL directly
import pandas as pd
import urllib
from datetime import datetime
# Setup the pyspark environment
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
# COMMAND ----------
# MAGIC %md
# MAGIC ### Download simulated data sets
# MAGIC We will be reusing the raw simulated data files from another tutorial. **The notebook automatically downloads these files stored at Microsoft/SQL-Server-R-Services-Samples GitHub site.**
# MAGIC
# MAGIC The five data files are:
# MAGIC - machines.csv
# MAGIC - maint.csv
# MAGIC - errors.csv
# MAGIC - telemetry.csv
# MAGIC - failures.csv
# MAGIC
# MAGIC There are 1000 machines of four different models. Each machine contains four components of interest, and four sensors measuring voltage, pressure, vibration and rotation. A controller monitors the system and raises alerts for five different error conditions. Maintenance logs indicate when something is done to the machine which does not include a component replacement. A failure is defined by the replacement of a component.
# MAGIC 
# MAGIC
# MAGIC This notebook does some preliminary data cleanup, creates summary graphics for each data set to verify the data downloaded correctly, and stores the resulting data sets in DBFS.
# COMMAND ----------
# The raw data is stored on GitHub here:
# https://github.com/Microsoft/SQL-Server-R-Services-Samples/tree/master/PredictiveMaintenanceModelingGuide/Data
# We access it through this URL:
basedataurl = "https://media.githubusercontent.com/media/Microsoft/SQL-Server-R-Services-Samples/master/PredictiveMaintenanceModelingGuide/Data/"
# We will store each of these data sets in DBFS.
# These file names detail which blob each files is stored under.
MACH_DATA = 'machines_data'
MAINT_DATA = 'maint_data'
ERROR_DATA = 'errors_data'
TELEMETRY_DATA = 'telemetry_data'
FAILURE_DATA = 'failure_data'
# COMMAND ----------
# MAGIC %md
# MAGIC #### Machines data set
# MAGIC This simulation tracks a simulated set of 1000 machines over the course of a single year (2015).
# MAGIC
# MAGIC This data set includes information about each machine: Machine ID, model type and age (years in service).
# COMMAND ----------
# load raw data from the GitHub URL
datafile = "machines.csv"
# Download the file once, and only once.
if not os.path.isfile(datafile):
urllib.request.urlretrieve(basedataurl+datafile, datafile)
# Read into pandas
machines = pd.read_csv(datafile)
# The data was read in using a Pandas data frame. We'll convert
# it to pyspark to ensure it is in a Spark usable form for later
# manipulations.
mach_spark = spark.createDataFrame(machines,
verifySchema=False)
# Write the Machine data set to intermediate storage
mach_spark.write.mode('overwrite').saveAsTable(MACH_DATA)
# COMMAND ----------
# MAGIC %md
# MAGIC #### Errors data set
# MAGIC The error log contains non-breaking errors recorded while the machine is still operational. These errors are not considered failures, though they may be predictive of a future failure event. The error datetime field is rounded to the closest hour since the telemetry data (loaded later) is collected on an hourly rate.
# COMMAND ----------
# load raw data from the GitHub URL
datafile = "errors.csv"
# Download the file once, and only once.
if not os.path.isfile(datafile):
urllib.request.urlretrieve(basedataurl+datafile, datafile)
# Read into pandas
errors = pd.read_csv(datafile, encoding='utf-8')
# The data was read in using a Pandas data frame. We'll convert
# it to pyspark to ensure it is in a Spark usable form for later
# manipulations.
error_spark = spark.createDataFrame(errors,
verifySchema=False)
# Write the Errors data set to intermediate storage
error_spark.write.mode('overwrite').saveAsTable(ERROR_DATA)
# COMMAND ----------
# MAGIC %md
# MAGIC #### Maintenance data set
# MAGIC The maintenance log contains both scheduled and unscheduled maintenance records. Scheduled maintenance corresponds with regular inspection of components, unscheduled maintenance may arise from mechanical failure or other performance degradations. A failure record is generated for component replacement in the case of either maintenance events. Because maintenance events can also be used to infer component life, the maintenance data has been collected over two years (2014, 2015) instead of only over the year of interest (2015).
# COMMAND ----------
# load raw data from the GitHub URL
datafile = "maint.csv"
# Download the file once, and only once.
if not os.path.isfile(datafile):
urllib.request.urlretrieve(basedataurl+datafile, datafile)
# Read into pandas
maint = pd.read_csv(datafile, encoding='utf-8')
# The data was read in using a Pandas data frame. We'll convert
# it to pyspark to ensure it is in a Spark usable form for later
# manipulations.
maint_spark = spark.createDataFrame(maint,
verifySchema=False)
# Write the Maintenance data set to intermediate storage
maint_spark.write.mode('overwrite').saveAsTable(MAINT_DATA)
# COMMAND ----------
# MAGIC %md
# MAGIC #### Telemetry data set
# MAGIC The telemetry time-series data consists of voltage, rotation, pressure, and vibration sensor measurements collected from each machines in real time. The data is averaged over an hour and stored in the telemetry logs.
# COMMAND ----------
# load raw data from the GitHub URL
datafile = "telemetry.csv"
# Download the file once, and only once.
if not os.path.isfile(datafile):
urllib.request.urlretrieve(basedataurl+datafile, datafile)
# Read into pandas
telemetry = pd.read_csv(datafile, encoding='utf-8')
# handle missing values
# define groups of features
features_datetime = ['datetime']
features_categorical = ['machineID']
features_numeric = list(set(telemetry.columns) - set(features_datetime) - set(features_categorical))
# Replace numeric NA with 0
telemetry[features_numeric] = telemetry[features_numeric].fillna(0)
# Replace categorical NA with 'Unknown'
telemetry[features_categorical] = telemetry[features_categorical].fillna("Unknown")
# The data was read in using a Pandas data frame. We'll convert
# it to pyspark to ensure it is in a Spark usable form for later
# manipulations.
# This line takes about 9.5 minutes to run.
telemetry_spark = spark.createDataFrame(telemetry, verifySchema=False)
# Write the telemetry data set to intermediate storage
telemetry_spark.write.mode('overwrite').saveAsTable(TELEMETRY_DATA)
# COMMAND ----------
# MAGIC %md
# MAGIC #### Failures data set
# MAGIC Failures correspond to component replacements within the maintenance log. Each record contains the Machine ID, component type, and replacement datetime. These records will be used to create the machine learning labels we will be trying to predict.
# COMMAND ----------
# load raw data from the GitHub URL
datafile = "failures.csv"
# Download the file once, and only once.
if not os.path.isfile(datafile):
urllib.request.urlretrieve(basedataurl+datafile, datafile)
# Read into pandas
failures = pd.read_csv(datafile, encoding='utf-8')
# The data was read in using a Pandas data frame. We'll convert
# it to pyspark to ensure it is in a Spark usable form for later
# manipulations.
failures_spark = spark.createDataFrame(failures,
verifySchema=False)
# Write the failures data set to intermediate storage
failures_spark.write.mode('overwrite').saveAsTable(FAILURE_DATA)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Conclusion
# MAGIC We have now stored the Raw data required for this Predictive Maintenance scenario as Spark data frames in the Azure Databricks instance. You can examine them in the Data panel accessible on the left. You should see the following five data sources:
# MAGIC
# MAGIC 1. error_files
# MAGIC 2. machine_files
# MAGIC 3. maint_files
# MAGIC 4. telemetry_files
# MAGIC 5. failure_files
# MAGIC
# MAGIC The .\notebooks\1a_raw data exploration notebooks does a preliminary data exploration on these data sets to help understand what we are working on. These data sets will be used in the next step .\notebooks\2a_feature_engineering notebook to generate the analysis data sets containing model features for our predictive maintenance machine learning model.
# COMMAND ----------
# MAGIC %md
# MAGIC # Book 1A: Raw data exploration
# MAGIC This notebook can be run after executing the 1_data_ingestion notebook. This notebook examines the SPARK data frames constructed in the previous notebook. Much of the text from the 1_data_ingestion notebook has been repeated here for convenience.
# MAGIC
# MAGIC ### Data source
# MAGIC The common data elements for predictive maintenance problems can be summarized as follows:
# MAGIC
# MAGIC * Machine features: The features specific to each individual machine, e.g. engine size, make, model, location, installation date.
# MAGIC * Telemetry data: The operating condition data collected from sensors, e.g. temperature, vibration, operating speeds, pressures.
# MAGIC * Maintenance history: The repair history of a machine, e.g. maintenance activities or component replacements, this can also include error code or runtime message logs.
# MAGIC * Failure history: The failure history of a machine or component of interest.
# MAGIC It is possible that failure history is contained within maintenance history, either as in the form of special error codes or order dates for spare parts. In those cases, failures can be extracted from the maintenance data. Additionally, different business domains may have a variety of other data sources that influence failure patterns which are not listed here exhaustively. These should be identified by consulting the domain experts when building predictive models.
# MAGIC
# MAGIC Some examples of above data elements from use cases are:
# MAGIC
# MAGIC **Machine conditions and usage:** Flight routes and times, sensor data collected from aircraft engines, sensor readings from ATM transactions, train events data, sensor readings from wind turbines, elevators and connected cars.
# MAGIC
# MAGIC **Machine features:** Circuit breaker technical specifications such as voltage levels, geolocation or car features such as make, model, engine size, tire types, production facility etc.
# MAGIC
# MAGIC **Failure history:** fight delay dates, aircraft component failure dates and types, ATM cash withdrawal transaction failures, train/elevator door failures, brake disk replacement order dates, wind turbine failure dates and circuit breaker command failures.
# MAGIC
# MAGIC **Maintenance history:** Flight error logs, ATM transaction error logs, train maintenance records including maintenance type, short description etc. and circuit breaker maintenance records.
# MAGIC
# MAGIC Given the above data sources, the two main data types we observe in predictive maintenance domain are temporal data and static data. Failure history, machine conditions, repair history, usage history are time series indicated by the timestamp of data collection. Machine and operator specific features, are more static, since they usually describe the technical specifications of machines or operator’s properties.
# MAGIC
# MAGIC For this scenario, we use relatively large-scale data to walk the user through the main steps from data ingestion, feature engineering, model building, and model operationalization and deployment. The code for the entire process is written in PySpark and implemented using Jupyter notebooks.
# MAGIC
# MAGIC ## Step 1A: Data exploration.
# MAGIC We do some preliminary data cleaning and verification, and generate some data exploration figures to help understand the data we will be working with in the remainder of this scenario.
# MAGIC
# MAGIC **Note:** This notebook will take about 2-3 minutes to execute all cells, depending on the compute configuration you have setup.
# COMMAND ----------
## Setup our environment by importing required libraries
# For creating some preliminary EDA plots.
# %matplotlib inline
import matplotlib.pyplot as plt
# Read csv file from URL directly
import pandas as pd
from ggplot import *
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
# These file names detail which blob each files is stored under.
MACH_DATA = 'machines_data'
MAINT_DATA = 'maint_data'
ERROR_DATA = 'errors_data'
TELEMETRY_DATA = 'telemetry_data'
FAILURE_DATA = 'failure_data'
# COMMAND ----------
# MAGIC %md
# MAGIC #### Load simulated data sets
# MAGIC We downloaded the simulated data files in the .\notebooks\1_data_ingestion notebook and stored the data as SPARK data frames in the five data sets:
# MAGIC * machines_files
# MAGIC * maint_files
# MAGIC * errors_files
# MAGIC * telemetry_files
# MAGIC * failures_files
# MAGIC
# MAGIC There are 1000 machines of four different models. Each machine contains four components of interest, and four sensors measuring voltage, pressure, vibration and rotation. A controller monitors the system and raises alerts for five different error conditions. Maintenance logs indicate when something is done to the machine which does not include a component replacement. A failure is defined by the replacement of a component.
# MAGIC 
# MAGIC
# MAGIC This notebook does some preliminary data cleanup and creates summary graphics for each data set to verify the data downloaded correctly
# MAGIC ##### Machines data set
# MAGIC This simulation tracks a simulated set of 1000 machines over the course of a single year (2015).
# MAGIC
# MAGIC This data set includes information about each machine: Machine ID, model type and age (years in service).
# MAGIC
# MAGIC The following figure plots a histogram of the machines age colored by the specific model.
# COMMAND ----------
mach = spark.table(MACH_DATA) # spark.sql("SELECT * FROM " + MACH_DATA)
machines = mach.toPandas()
# one hot encoding of the variable model, basically creates a set of dummy boolean variablesplt.figure(figsize=(8, 6))
fig, ax = plt.subplots()
_, bins, _ = plt.hist([machines.loc[machines['model'] == 'model1', 'age'],
machines.loc[machines['model'] == 'model2', 'age'],
machines.loc[machines['model'] == 'model3', 'age'],
machines.loc[machines['model'] == 'model4', 'age']],
20, stacked=True,
label=['model1', 'model2', 'model3', 'model4'])
plt.xlabel('Age (yrs)')
plt.ylabel('Count')
plt.legend()
display(fig)
# COMMAND ----------
display(mach)
# COMMAND ----------
# MAGIC %md
# MAGIC The figure shows how long the collection of machines have been in service. It indicates there are four model types, shown in different colors, and all four models have been in service over the entire 20 years of service. The machine age will be a feature in our analysis, since we expect older machines may have a different set of errors and failures then machines that have not been in service long.
# COMMAND ----------
# MAGIC %md
# MAGIC #### Errors data set
# MAGIC The error log contains non-breaking errors recorded while the machine is still operational. These errors are not considered failures, though they may be predictive of a future failure event. The error datetime field is rounded to the closest hour since the telemetry data (loaded later) is collected on an hourly rate.
# MAGIC The following histogram details the distribution of the errors tracked in the log files.
# COMMAND ----------
errors = spark.table(ERROR_DATA)
# COMMAND ----------
#Quick plot to show structure.
display(errors)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Maintenance data set
# MAGIC The maintenance log contains both scheduled and unscheduled maintenance records. Scheduled maintenance corresponds with regular inspection of components, unscheduled maintenance may arise from mechanical failure or other performance degradations. A failure record is generated for component replacement in the case of either maintenance events. Because maintenance events can also be used to infer component life, the maintenance data has been collected over two years (2014, 2015) instead of only over the year of interest (2015).
# COMMAND ----------
maint = spark.table(MAINT_DATA)
# COMMAND ----------
# Quick plot to show structure
display(maint)
# COMMAND ----------
# MAGIC %md
# MAGIC The figure shows a histogram of component replacements divided into the four component types over the entire maintenance history. It looks like these four components are replaced at similar rates.
# MAGIC
# MAGIC There are many ways we might want to look at this data including calculating how long each component type lasts, or the time history of component replacements within each machine. This will take some preprocess of the data, which we are delaying until we do the feature engineering steps in the next example notebook.
# MAGIC
# MAGIC Next, we convert the errors data to a Spark dataframe, and verify the data types have converted correctly.
# COMMAND ----------
# MAGIC %md
# MAGIC #### Telemetry data set
# MAGIC The telemetry time-series data consists of voltage, rotation, pressure, and vibration sensor measurements collected from each machines in real time. The data is averaged over an hour and stored in the telemetry logs.
# MAGIC
# MAGIC Rather than plot 8.7 million data points, this figure plots a month of measurements for a single machine. This is representative of each feature repeated for every machine over the entire year of sensor data collection.
# COMMAND ----------
telemetry = spark.table(TELEMETRY_DATA).toPandas()
plt_data = telemetry.loc[telemetry['machineID'] == 1]
# format datetime field which comes in as string
plt_data['datetime'] = pd.to_datetime(plt_data['datetime'], format="%Y-%m-%d %H:%M:%S")
# Quick plot to show structure
plot_df = plt_data.loc[(plt_data['datetime'] >=
|
pd.to_datetime('2015-02-01')
|
pandas.to_datetime
|
#!/usr/bin/env python
# Author: <NAME> (jsh) [<EMAIL>]
import itertools
import joblib
import logging
import os.path
import pathlib
import random
import shutil
import sys
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from sklearn import preprocessing as skpreproc
from keras.layers import Dense
from keras.models import Sequential
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
np.set_printoptions(precision=4, suppress=True)
_CODEDIR = pathlib.Path(__file__).parent
MODELDIR = _CODEDIR / 'model'
MODELFILE = MODELDIR / 'model.d5'.format(**locals())
XS_FILE = MODELDIR / 'xscaler.dump'
YS_FILE = MODELDIR / 'yscaler.dump'
_EPOCHS = 30
_BATCH_SIZE = 32
def _build_linear_model(num_features):
model = Sequential()
model.add(Dense(1, input_dim=num_features, activation='linear'))
model.compile(loss='mse', metrics=['mse'], optimizer='adam')
return model
def _expand_dummies(frame):
categories = dict()
bases = ['A', 'C', 'G', 'T']
idxs = [x for x in range(20)] # Magic number because guidelen is fixed.
pairs = [''.join(pair) for pair in itertools.product(bases, bases)]
categories['mm_idx'] = idxs
categories['mm_trans'] = pairs
widecols = list()
for column in frame.columns:
if column not in categories:
continue
frame[column] = frame[column].astype(
|
CategoricalDtype(categories[column])
|
pandas.api.types.CategoricalDtype
|
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(
|
StringIO(self.data1)
|
pandas.compat.StringIO
|
# Copyright (c) 2018-2019, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
from utils import assert_eq
import nvstrings
def compare_split_records(nvstrs, pstrs):
for i in range(len(nvstrs)):
got = nvstrs[i]
expected = pstrs[i]
if not got:
if expected is None:
continue
assert got.to_host() == expected
def test_split_record():
s = ["héllo", None, "a_bc_déf", "a__bc", "_ab_cd", "ab_cd_", "", " a b "]
strs = nvstrings.to_device(s)
pstrs =
|
pd.Series(s)
|
pandas.Series
|
# encoding=utf-8
'''
lb 0.2190 2 folds
'''
from nltk.corpus import stopwords
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.cross_validation import KFold
from sklearn.linear_model import Ridge
from scipy.sparse import hstack, csr_matrix
import pandas as pd
import numpy as np
import lightgbm as lgb
import matplotlib.pyplot as plt
import gc, re
from sklearn.utils import shuffle
from contextlib import contextmanager
from sklearn.externals import joblib
import time
start_time=time.time()
print("Starting job at time:", time.time())
debug = False
print("loading data ...")
used_cols = ["item_id", "user_id"]
if debug == False:
train_df = pd.read_csv("../input/train.csv", parse_dates=["activation_date"])
y = train_df["deal_probability"]
test_df = pd.read_csv("../input/test.csv", parse_dates=["activation_date"])
# 加入翻译的title文本
train_df['title_ru'] = pd.read_csv('../input/train_ru_title.csv')
test_df['title_ru'] = pd.read_csv('../input/test_ru_title.csv')
# suppl
train_active = pd.read_csv("../input/train_active.csv", usecols=used_cols)
test_active = pd.read_csv("../input/test_active.csv", usecols=used_cols)
train_periods = pd.read_csv("../input/periods_train.csv", parse_dates=["date_from", "date_to"])
test_periods = pd.read_csv("../input/periods_test.csv", parse_dates=["date_from", "date_to"])
else:
train_df = pd.read_csv("../input/train.csv", parse_dates=["activation_date"])
train_df = shuffle(train_df, random_state=1234);
# 加入翻译的title文本
train_df['title_ru'] = pd.read_csv('../input/train_ru_title.csv')
train_df = train_df.iloc[:10000*5]
y = train_df["deal_probability"]
test_df = pd.read_csv("../input/test.csv", nrows=1000, parse_dates=["activation_date"])
test_df['title_ru'] = pd.read_csv('../input/test_ru_title.csv', nrows=1000)
# suppl
train_active = pd.read_csv("../input/train_active.csv", nrows=1000, usecols=used_cols)
test_active = pd.read_csv("../input/test_active.csv", nrows=1000, usecols=used_cols)
train_periods = pd.read_csv("../input/periods_train.csv", nrows=1000, parse_dates=["date_from", "date_to"])
test_periods = pd.read_csv("../input/periods_test.csv", nrows=1000, parse_dates=["date_from", "date_to"])
print("loading data done!")
# =============================================================================
# Add image quality: by steeve
# =============================================================================
import pickle
from nltk.tokenize import wordpunct_tokenize
with open('../input/inception_v3_include_head_max_train.p', 'rb') as f:
x = pickle.load(f)
train_features = x['features']
train_ids = x['ids']
with open('../input/inception_v3_include_head_max_test.p', 'rb') as f:
x = pickle.load(f)
test_features = x['features']
test_ids = x['ids']
del x;
gc.collect()
incep_train_image_df = pd.DataFrame(train_features, columns=['image_quality'],dtype=np.float32)
incep_test_image_df = pd.DataFrame(test_features, columns=['image_quality'],dtype=np.float32)
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
del train_features,test_features
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
with open('../input/train_image_features.p', 'rb') as f:
x = pickle.load(f)
train_blurinesses = x['blurinesses']
train_ids = x['ids']
with open('../input/test_image_features.p', 'rb') as f:
x = pickle.load(f)
test_blurinesses = x['blurinesses']
test_ids = x['ids']
del x;
gc.collect()
incep_train_image_df = pd.DataFrame(train_blurinesses, columns=['blurinesses'],dtype=np.float32)
incep_test_image_df = pd.DataFrame(test_blurinesses, columns=['blurinesses'],dtype=np.float32)
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
del train_blurinesses,test_blurinesses
print('adding whitenesses ...')
with open('../input/train_image_features.p', 'rb') as f:
x = pickle.load(f)
train_whitenesses = x['whitenesses']
train_ids = x['ids']
with open('../input/test_image_features.p', 'rb') as f:
x = pickle.load(f)
test_whitenesses = x['whitenesses']
test_ids = x['ids']
del x;
gc.collect()
incep_train_image_df = pd.DataFrame(train_whitenesses, columns=['whitenesses'],dtype=np.float32)
incep_test_image_df = pd.DataFrame(test_whitenesses, columns=['whitenesses'],dtype=np.float32)
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
del train_whitenesses,test_whitenesses
print('adding dullnesses ...')
with open('../input/train_image_features.p', 'rb') as f:
x = pickle.load(f)
train_dullnesses = x['dullnesses']
train_ids = x['ids']
with open('../input/test_image_features.p', 'rb') as f:
x = pickle.load(f)
test_dullnesses = x['dullnesses']
test_ids = x['ids']
del x;
gc.collect()
incep_train_image_df = pd.DataFrame(train_dullnesses, columns=['dullnesses'],dtype=np.float32)
incep_test_image_df = pd.DataFrame(test_dullnesses, columns=['dullnesses'],dtype=np.float32)
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
del train_dullnesses,test_dullnesses
#new image features---average_reds
with open('../input/train_image_features_1.p', 'rb') as f:
x = pickle.load(f)
train_avg_rgb = x['average_reds']
train_ids = x['ids']
with open('../input/test_image_features_1.p', 'rb') as f:
x = pickle.load(f)
test_avg_rgb = x['average_reds']
test_ids = x['ids']
del x;
gc.collect()
print("load average_reds...")
incep_train_image_df =
|
pd.DataFrame(train_avg_rgb, columns=['average_reds'],dtype=np.float32)
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# # Antivirals QSAR using DeepChem for Coronaviruses
# From the Chembl database we extracted all compounds with reported interaction to CHEMBL5118 and CHEMBL3927. These references correspond to replicase polyprotein 1ab and SARS coronavirus 3C-like proteinase respectively. These are the only coronavirus related targets in the ChemBL database.
#
# We used SMILES of drugs as inputs that are transformed into a specific format for the future classification (anti-viral or non-antiviral) that uses Graph Convolutional Nets (DeepChem package). Instead of designing a featurization ourselves, the method learns one automatically from the data similar with the Convolutional Neural Networks for images.
#
#
# The calculations used Google Colab GPU.
# ### Instalation of DeepChem with GPU on GColab
# In[3]:
get_ipython().run_line_magic('tensorflow_version', '1.x')
get_ipython().system('wget -c https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh')
get_ipython().system('chmod +x Miniconda3-latest-Linux-x86_64.sh')
get_ipython().system('bash ./Miniconda3-latest-Linux-x86_64.sh -b -f -p /usr/local')
get_ipython().system('conda install -y -c deepchem -c rdkit -c conda-forge -c omnia deepchem-gpu=2.3.0')
import sys
sys.path.append('/usr/local/lib/python3.7/site-packages/')
# ### Set parameters, file names, path
#
# Set the seed for numpy and tensorflow:
# In[ ]:
import numpy as np
np.random.seed(42)
import tensorflow as tf
tf.set_random_seed(42)
import deepchem as dc
# Mount the folder with all files from Google Drive:
# In[ ]:
from google.colab import drive
drive.mount('/content/drive')
# In[ ]:
import os
import pandas as pd
# define the path to the dataset for training
input_data=os.path.join("/content/drive/My Drive/MyProjects/DeepChem/antivirals/datasets",'antivirals_SMILES.csv')
# define the path to new prediction data
pred_data=os.path.join("/content/drive/My Drive/MyProjects/DeepChem/antivirals/datasets",'DB_SMILES4prediction.csv')
# ### Transform SMILES into Convolutional Graph format
# In[8]:
# define output name (Class)
tasks=['Class']
# define features
featurizer=dc.feat.ConvMolFeaturizer()
# load data and calculate the features for dataset
loader = dc.data.CSVLoader(tasks=tasks, smiles_field="smiles",featurizer=featurizer)
dataset=loader.featurize(input_data, data_dir='/content/drive/My Drive/MyProjects/DeepChem/antivirals/features_antivirals/')
# calculate the same features for new data to predict
loader2 = dc.data.CSVLoader(tasks=tasks, smiles_field="smiles",featurizer=featurizer)
dataset_pred=loader2.featurize(pred_data, data_dir='/content/drive/My Drive/MyProjects/DeepChem/antivirals/features_DBpredictions/')
# In[9]:
print('Full dataset samples : {}'.format(dataset.X.shape[0]))
print('External dataset samples : {}'.format(dataset_pred.X.shape[0]))
# In[10]:
# define a transformer for data using only training subset!
transformers = [
dc.trans.NormalizationTransformer(transform_y=True, dataset=dataset)]
# apply transformation to all datasets including the external
for transformer in transformers:
dataset = transformer.transform(dataset)
for transformer in transformers:
dataset_pred = transformer.transform(dataset_pred)
# Define the metrics for training as AUROC:
# In[ ]:
# define mean AUROC metrics for the classifier
metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
# define number of internal features
n_feat = 75
# define batch size during the training
batch_size = 32
# dropout
n_dropout = 0.05
# ### Test with one split and different epochs
# In[65]:
from deepchem.models import GraphConvModel
# define a splitter
splitter = dc.splits.SingletaskStratifiedSplitter() #ScaffoldSplitter
# split dataset into train, test subsets (80% - 20%)
train_dataset, test_dataset= splitter.train_test_split(dataset,
seed=80,
frac_train=0.8,
verbose=False)
print('Full dataset samples : {}'.format(dataset.X.shape[0]))
print('Train dataset samples : {}'.format(train_dataset.X.shape[0]))
print('Test dataset samples : {}'.format(test_dataset.X.shape[0]))
model = GraphConvModel(
len(tasks), batch_size=batch_size, mode='classification',
dropout=n_dropout,
# model_dir='/content/drive/My Drive/MyProjects/DeepChem/antivirals/models/oneSplitMoreEpochs',
random_seed=42) # same seed here!
# check the error for optimal number of epochs
num_epochs = 200
losses = []
auroc_train = []
auroc_test = []
for i in range(num_epochs):
loss = model.fit(train_dataset, nb_epoch=1, deterministic=True)
print("Epoch %d loss: %f" % (i, loss))
losses.append(loss)
# print statistics
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
print("Training ROC-AUC Score: %f" % train_scores["mean-roc_auc_score"])
test_scores = model.evaluate(test_dataset, [metric], transformers)
print("Test ROC-AUC Score: %f" % test_scores["mean-roc_auc_score"])
auroc_train.append(train_scores["mean-roc_auc_score"])
auroc_test.append(test_scores["mean-roc_auc_score"])
# In[66]:
# plot the errors
import matplotlib.pyplot as plot
plot.ylabel("Loss")
plot.xlabel("Epoch")
x = range(num_epochs)
y = losses
plot.scatter(x, y)
plot.show()
# In[67]:
# plot the auroc train
import matplotlib.pyplot as plot
plot.ylabel("AUROC train")
plot.xlabel("Epoch")
x = range(num_epochs)
y = auroc_train
plot.scatter(x, y)
plot.show()
# In[68]:
# plot the auroc test
import matplotlib.pyplot as plot
plot.ylabel("AUROC test")
plot.xlabel("Epoch")
x = range(num_epochs)
y = auroc_test
plot.scatter(x, y)
plot.show()
# In[70]:
np.max(auroc_test)
# In[71]:
np.mean(auroc_test)
# ### Classification for 10 random stratified splits
# In[ ]:
# define batch size during the training
batch_size = 32
# dropout
n_dropout = 0.05
# number of epochs
n_epoch = 70
# In[74]:
from deepchem.models import GraphConvModel
scores_train=[]
scores_test =[]
# for each seed for external split TRAIN-TEST
for seed_ext in [10,20,30,40,50,60,70,80,90,100]:
print("*** External split")
print("> ext seed =", seed_ext)
# define a splitter
splitter = dc.splits.SingletaskStratifiedSplitter()
# split dataset into train, test subsets (80% - 20%)
train_dataset, test_dataset= splitter.train_test_split(dataset,
seed=seed_ext,
frac_train=0.8,
verbose=False)
print('Full dataset samples : {}'.format(dataset.X.shape[0]))
print('Train dataset samples : {}'.format(train_dataset.X.shape[0]))
print('Test dataset samples : {}'.format(test_dataset.X.shape[0]))
model = GraphConvModel(
len(tasks), batch_size=batch_size, mode='classification',
dropout=n_dropout,
random_seed=42) # same seed here!
# Fit model using train_data
model.fit(train_dataset, nb_epoch=n_epoch, deterministic=True) # 5 for testing
# evaluating the model for train-test
train_scores = model.evaluate(train_dataset, [metric], transformers)
scores_train.append(train_scores["mean-roc_auc_score"])
# evaluating test scores
test_scores = model.evaluate(test_dataset, [metric], transformers)
scores_test.append(test_scores["mean-roc_auc_score"])
# In[75]:
scores_train
# In[76]:
scores_test
# In[77]:
np.mean(scores_train), np.mean(scores_test)
# Test scores:
# In[78]:
import matplotlib.pyplot as plt
fig1, ax1 = plt.subplots()
ax1.set_title('Basic Plot')
ax1.boxplot(scores_test)
# ### Get the final model to make prediction
# In[ ]:
# define batch size during the training
batch_size = 32
# dropout
n_dropout = 0.05
# number of epochs
n_epoch = 70
# In[53]:
# define a splitter
splitter = dc.splits.SingletaskStratifiedSplitter() #ScaffoldSplitter
# split dataset into train, test subsets (80% - 20%)
train_dataset, test_dataset= splitter.train_test_split(dataset,
seed=80,
frac_train=0.8,
verbose=False)
print('Full dataset samples : {}'.format(dataset.X.shape[0]))
print('Train dataset samples : {}'.format(train_dataset.X.shape[0]))
print('Test dataset samples : {}'.format(test_dataset.X.shape[0]))
# In[54]:
# define the model
from deepchem.models import GraphConvModel
model = GraphConvModel(
len(tasks), batch_size=batch_size, mode='classification',
dropout=n_dropout,
model_dir='/content/drive/My Drive/MyProjects/DeepChem/antivirals/models_antivirals/', # output folder for models!
random_seed=42) # same seed here!
# In[55]:
# Fit trained model
model.fit(train_dataset, nb_epoch=n_epoch, deterministic=True)
# In[56]:
# print statistics
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
print("Training ROC-AUC Score: %f" % train_scores["mean-roc_auc_score"])
test_scores = model.evaluate(test_dataset, [metric], transformers)
print("Test ROC-AUC Score: %f" % test_scores["mean-roc_auc_score"])
# In[22]:
test_scores
# ### Make prediction with this model
# In[ ]:
predictions = model.predict(dataset_pred)
# In[58]:
import pandas as pd
df = pd.read_csv(pred_data)
df
# In[59]:
# create a dataframe with the predictions
df_preds = pd.DataFrame (columns = ['smiles','ProbClass1'])
df_preds['smiles'] = list(dataset_pred.ids)
df_preds['ProbClass1'] = list(predictions[:,0,1]) # second column = % class 1
df_preds
# In[60]:
merged_inner =
|
pd.merge(left=df, right=df_preds, left_on='smiles', right_on='smiles')
|
pandas.merge
|
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
from rdkit import Chem
import numpy as np
from collections import Counter, defaultdict
from sklearn.metrics import roc_auc_score, mean_squared_error
import argparse
import re
from utils import read_contrib_spci
def calc_auc(merged_df,
which_lbls=("positive", "negative"),
contrib_col_name="contrib",
lbl_col_name="lbl"):
# auc will be calculated only for mols having lbld atoms, otherwise -1 returned
def auc_wrapper(y_true, y_score, inverse=False):
if len(set(y_true)) > 1:
if not inverse: return roc_auc_score(y_true, y_score)
else: return 1 - roc_auc_score(y_true, y_score)
else:
return -1
res = {}
if "positive" in which_lbls:
res["auc_pos"] = merged_df.groupby(by="molecule").apply(lambda gr: auc_wrapper(y_true=gr[lbl_col_name]>0,
y_score=gr[contrib_col_name]))
if "negative" in which_lbls:
res["auc_neg"] = merged_df.groupby(by="molecule").apply(lambda gr: auc_wrapper(y_true=gr[lbl_col_name]<0,
y_score=gr[contrib_col_name],inverse=True))
return res
def merge_lbls_contribs(contribs, lbls, lbl_col_name="lbl"):
merged_df = pd.merge(
contribs, lbls,
how="inner")
# next lines (left join) potentially lead to incorrect rmse if setdiff(mols_from_contribs, mols_from_sdf)>0!
# because if SOME ATOMS of given mol aren't in sdf - we mustnot use their contribs, they maybe nonexisitng atoms!
# however, in this case (inner join) only molecules with labels for all atoms, ie not NA,will be used
# how="left") # left join: all atoms with contribs will be used
# merged_df.loc[
# pd.isna(merged_df[lbl_col_name]),
# lbl_col_name] = 0 # set zero lbl to atoms missing in ids table
return merged_df
def read_contrib(contrib, sep=","):
contrib =
|
pd.read_csv(contrib,sep=sep)
|
pandas.read_csv
|
# Example training commands:
# python train.py --config=mlp_mfcc --batch_size=32 --checkpoint_dir=./checkpoints/mlp_baseline_tst
# python train.py --config=resnet_base --batch_size=32 --checkpoint_dir=./checkpoints/resnet_tst
# python train.py --config=resnet_with_augmentation --batch_size=32 --checkpoint_dir=./checkpoints/resnet_aug_tst
# python train.py --config=resnet_with_augmentation --batch_size=32 --checkpoint_dir=./checkpoints/resnet_aug_audioset_tst --train_on_noisy_audioset=True
import load_data
from functools import partial
import time
import configs
import models
from sklearn.utils import shuffle
from tensorboardX import SummaryWriter
from torch import optim, nn
import os
import sys
import pickle
import time
import librosa
import argparse
import torch
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from tqdm import tqdm
import warnings
# Lhotse imports
from torch.utils.data import DataLoader
from lhotse import CutSet
from lhotse.dataset import VadDataset, SingleCutSampler
sys.path.append('./utils/')
import torch_utils
import dataset_utils
import audio_utils
import data_loaders
warnings.filterwarnings('ignore', category=UserWarning)
learning_rate = 0.01 # Learning rate.
decay_rate = 0.9999 # Learning rate decay per minibatch.
min_learning_rate = 0.000001 # Minimum learning rate.
sample_rate = 16000
num_train_steps = 100000
parser = argparse.ArgumentParser()
######## REQUIRED ARGS #########
# Load a preset configuration object. Defines model size, etc. Required
parser.add_argument('--config', type=str, required=True)
# Set a directory to store model checkpoints and tensorboard. Creates a directory if doesn't exist
parser.add_argument('--checkpoint_dir', type=str, required=True)
# Set root data directory containing "Signals/<meeting_id>/<channel>.sph audio files
parser.add_argument('--data_root', type=str, required=True)
######## OPTIONAL ARGS #########
# Directory containing the Dataframes for train, val, test data
# These dataframes contain the segment information for speech/laughter segments
parser.add_argument('--data_dfs_dir', type=str, default='data_dfs')
# Set batch size. Overrides batch_size set in the config object
parser.add_argument('--batch_size', type=str)
# Default to use GPU. can set to 'cpu' to override
parser.add_argument('--torch_device', type=str, default='cuda')
# Number of processes for parallel processing on cpu. Used mostly for loading in large datafiles
# before training begins or when re-sampling data between epochs
parser.add_argument('--num_workers', type=str, default='8')
# 0.5 unless specified here
parser.add_argument('--dropout_rate', type=str, default='0.5')
# number of batches to accumulate before applying gradients
parser.add_argument('--gradient_accumulation_steps', type=str, default='1')
# include_words flag - if set, data loader will include laughter combined with words
# For example, [laughter - I], [laughter - think], ['laughter -so ']
# This option is not used in the paper
parser.add_argument('--include_words', type=str, default=None)
# Audioset noisy-label training flag
# Flag - if set, train on AudioSet with noisy labels, rather than Switchboard with good labels
parser.add_argument('--train_on_noisy_audioset', type=str, default=None)
args = parser.parse_args()
config = configs.CONFIG_MAP[args.config]
checkpoint_dir = args.checkpoint_dir
data_root = args.data_root
data_dfs_dir = args.data_dfs_dir
batch_size = int(args.batch_size or config['batch_size'])
val_data_text_path = config['val_data_text_path']
feature_fn = partial(config['feature_fn'], sr=sample_rate)
augment_fn = config['augment_fn']
log_frequency = config['log_frequency']
swb_train_audio_pkl_path = config['swb_train_audio_pkl_path']
swb_val_audio_pkl_path = config['swb_val_audio_pkl_path']
audioset_noisy_train_audio_pkl_path = config['audioset_noisy_train_audio_pkl_path']
a_root = config['swb_audio_root']
t_root = config['swb_transcription_root']
expand_channel_dim = config['expand_channel_dim']
torch_device = args.torch_device
num_workers = int(args.num_workers)
dropout_rate = float(args.dropout_rate)
supervised_augment = config['supervised_augment']
supervised_spec_augment = config['supervised_spec_augment']
gradient_accumulation_steps = int(args.gradient_accumulation_steps)
if args.include_words is not None:
include_words = True
else:
include_words = False
if args.train_on_noisy_audioset is not None:
train_on_noisy_audioset = True
else:
train_on_noisy_audioset = False
collate_fn = partial(audio_utils.pad_sequences_with_labels,
expand_channel_dim=expand_channel_dim)
##################################################################
#################### Setup Training Model ######################
##################################################################
def load_noise_files():
noise_files = librosa.util.find_files('./data/background_noise_files/')
music_files = librosa.util.find_files('./data/background_music_files/')
np.random.seed(0)
noise_files += list(np.random.choice(music_files, 200))
noise_signals = audio_utils.parallel_load_audio_batch(
noise_files, n_processes=8, sr=sample_rate)
noise_signals = [s for s in noise_signals if len(s) > sample_rate]
return noise_signals
def load_impulse_responses():
ir_files = librosa.util.find_files('./data/impulse_responses/')
impulse_responses = audio_utils.parallel_load_audio_batch(
ir_files, n_processes=8, sr=sample_rate)
return impulse_responses
def run_training_loop(n_epochs, model, device, checkpoint_dir,
optimizer, iterator, log_frequency=25, val_iterator=None, gradient_clip=1.,
verbose=True):
for epoch in range(n_epochs):
start_time = time.time()
train_loss = run_epoch(model, 'train', device, iterator,
checkpoint_dir=checkpoint_dir, optimizer=optimizer,
log_frequency=log_frequency, checkpoint_frequency=log_frequency,
clip=gradient_clip, val_iterator=val_iterator,
verbose=verbose)
if verbose:
end_time = time.time()
epoch_mins, epoch_secs = torch_utils.epoch_time(
start_time, end_time)
print(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s')
def run_epoch(model, mode, device, iterator, checkpoint_dir, optimizer=None, clip=None,
batches=None, log_frequency=None, checkpoint_frequency=None,
validate_online=True, val_iterator=None, val_batches=None,
verbose=True):
""" args:
mode: 'train' or 'eval'
"""
def _eval_for_logging(model, device, val_itr, val_iterator, val_batches_per_log):
model.eval()
val_losses = []
val_accs = []
for j in range(val_batches_per_log):
try:
val_batch = val_itr.next()
except StopIteration:
val_itr = iter(val_iterator)
val_batch = val_itr.next()
val_loss, val_acc = _eval_batch(model, device, val_batch)
val_losses.append(val_loss)
val_accs.append(val_acc)
model.train()
return val_itr, np.mean(val_losses), np.mean(val_accs)
def _eval_batch(model, device, batch, batch_index=None, clip=None):
if batch is None:
print("None Batch")
return 0.
with torch.no_grad():
#seqs, labs = batch
segs = batch['inputs']
labs = batch['is_laugh']
src = torch.from_numpy(np.array(segs)).float().to(device)
src = src[:,None,:,:] # add additional dimension
trg = torch.from_numpy(np.array(labs)).float().to(device)
output = model(src).squeeze()
criterion = nn.BCELoss()
bce_loss = criterion(output, trg)
preds = torch.round(output)
print(f'targets: {trg}')
print(f'preds: {preds}')
# sum(preds==trg).float()/len(preds)
acc = torch.sum(preds == trg).float()/len(trg)
return bce_loss.item(), acc.item()
def _train_batch(model, device, batch, batch_index=None, clip=None):
if batch is None:
print("None Batch")
return 0.
#seqs, labs = batch
segs = batch['inputs']
labs = batch['is_laugh']
src = torch.from_numpy(np.array(segs)).float().to(device)
src = src[:,None,:,:] # add additional dimension
trg = torch.from_numpy(np.array(labs)).float().to(device)
# optimizer.zero_grad()
output = model(src).squeeze()
criterion = nn.BCELoss()
preds = torch.round(output)
acc = torch.sum(preds == trg).float()/len(trg)
bce_loss = criterion(output, trg)
loss = bce_loss
loss = loss/gradient_accumulation_steps
loss.backward()
if model.global_step % gradient_accumulation_steps == 0:
if clip is not None:
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
model.zero_grad()
return bce_loss.item(), acc.item()
# TODO: possibly take out this case because we just want to support passing an iterator anyway?
if False: #not (bool(iterator) ^ bool(batches)):
raise Exception("Must pass either `iterator` or batches")
if mode.lower() not in ['train', 'eval']:
raise Exception("`mode` must be 'train' or 'eval'")
if mode.lower() == 'train' and validate_online:
#val_batches_per_epoch = torch_utils.num_batches_per_epoch(val_iterator)
#val_batches_per_log = int(np.round(val_batches_per_epoch))
val_batches_per_log = 10 #TODO hardcoded for now
val_itr = iter(val_iterator)
if mode == 'train':
if optimizer is None:
raise Exception("Must pass Optimizer in train mode")
model.train()
_run_batch = _train_batch
elif mode == 'eval':
model.eval()
_run_batch = _eval_batch
epoch_loss = 0
optimizer = optim.Adam(model.parameters())
if iterator is not None:
# batches_per_epoch = torch_utils.num_batches_per_epoch(iterator)
batch_losses = []
batch_accs = []
# batch_consistency_losses = []
# batch_ent_losses = []
num_batches = 0
for i, batch in tqdm(enumerate(iterator)):
# learning rate scheduling
lr = (learning_rate - min_learning_rate) * \
decay_rate**(float(model.global_step))+min_learning_rate
optimizer.lr = lr
batch_loss, batch_acc = _run_batch(model, device, batch,
batch_index=i, clip=clip)
batch_losses.append(batch_loss)
batch_accs.append(batch_acc)
if log_frequency is not None and (model.global_step + 1) % log_frequency == 0:
val_itr, val_loss_at_step, val_acc_at_step = _eval_for_logging(model, device,
val_itr, val_iterator, val_batches_per_log)
is_best = (val_loss_at_step < model.best_val_loss)
if is_best:
model.best_val_loss = val_loss_at_step
train_loss_at_step = np.mean(batch_losses)
train_acc_at_step = np.mean(batch_accs)
if verbose:
print("\nLogging at step: ", model.global_step)
print("Train loss: ", train_loss_at_step)
print("Train accuracy: ", train_acc_at_step)
print("Val loss: ", val_loss_at_step)
print("Val accuracy: ", val_acc_at_step)
writer.add_scalar(
'loss/train', train_loss_at_step, model.global_step)
writer.add_scalar(
'acc/train', train_acc_at_step, model.global_step)
writer.add_scalar(
'loss/eval', val_loss_at_step, model.global_step)
writer.add_scalar(
'acc/eval', val_acc_at_step, model.global_step)
batch_losses = []
batch_accs = [] # reset
if checkpoint_frequency is not None and (model.global_step + 1) % checkpoint_frequency == 0:
state = torch_utils.make_state_dict(model, optimizer, model.epoch,
model.global_step, model.best_val_loss)
torch_utils.save_checkpoint(
state, is_best=is_best, checkpoint=checkpoint_dir)
epoch_loss += batch_loss
model.global_step += 1
num_batches =+1
model.epoch += 1
return epoch_loss / num_batches
print("Initializing model...")
device = torch.device(torch_device if torch.cuda.is_available() else 'cpu')
print("Using device", device)
model = config['model'](dropout_rate=dropout_rate,
linear_layer_size=config['linear_layer_size'], filter_sizes=config['filter_sizes'])
model.set_device(device)
torch_utils.count_parameters(model)
model.apply(torch_utils.init_weights)
optimizer = optim.Adam(model.parameters())
if os.path.exists(checkpoint_dir) and os.path.isfile(os.path.join(checkpoint_dir, 'last.pth.tar')):
torch_utils.load_checkpoint(
checkpoint_dir+'/last.pth.tar', model, optimizer)
else:
print("Saving checkpoints to ", checkpoint_dir)
print("Beginning training...")
writer = SummaryWriter(checkpoint_dir)
if augment_fn is not None:
print("Loading background noise files...")
noise_signals = load_noise_files()
augment_fn = partial(augment_fn, noise_signals=noise_signals)
print("Loading impulse responses...")
impulse_responses = load_impulse_responses()
augment_fn = partial(augment_fn, impulse_responses=impulse_responses)
if supervised_augment:
augmented_feature_fn = partial(feature_fn, augment_fn=augment_fn)
else:
augmented_feature_fn = feature_fn
if supervised_spec_augment:
augmented_feature_fn = partial(
feature_fn, spec_augment_fn=audio_utils.spec_augment)
#########################################################
############ Do this once, keep in memory ############
#########################################################
# print("Loading switchboard audio files...")
# with open(swb_train_audio_pkl_path, "rb") as f: # Loads all switchboard audio files
# switchboard_train_audio_hash = pickle.load(f)
# with open(swb_val_audio_pkl_path, "rb") as f:
# switchboard_val_audios_hash = pickle.load(f)
# all_audio_files = librosa.util.find_files(a_root, ext='sph')
# train_folders, val_folders, test_folders = dataset_utils.get_train_val_test_folders(
# t_root)
# t_files_a, a_files = dataset_utils.get_audio_files_from_transcription_files(
# dataset_utils.get_all_transcriptions_files(train_folders, 'A'), all_audio_files)
# t_files_b, _ = dataset_utils.get_audio_files_from_transcription_files(
# dataset_utils.get_all_transcriptions_files(train_folders, 'B'), all_audio_files)
# # These two lists aren't used - possibly remove?
# all_swb_train_sigs = [switchboard_train_audio_hash[k]
# for k in switchboard_train_audio_hash if k in a_files]
# all_swb_val_sigs = [switchboard_val_audios_hash[k]
# for k in switchboard_val_audios_hash]
def get_audios_from_text_data(data_file_or_lines, h, sr=sample_rate):
# This function doesn't use the subsampled offset and duration
# So it will need to be handled later, in the data loader
#column_names = ['offset','duration','audio_path','label']
column_names = ['offset', 'duration', 'subsampled_offset',
'subsampled_duration', 'audio_path', 'label']
audios = []
if type(data_file_or_lines) == type([]):
df = pd.DataFrame(data=data_file_or_lines, columns=column_names)
else:
df = pd.read_csv(data_file_or_lines, sep='\t',
header=None, names=column_names)
audio_paths = list(df.audio_path)
offsets = list(df.offset)
durations = list(df.duration)
for i in tqdm(range(len(audio_paths))):
aud = h[audio_paths[i]][int(offsets[i]*sr) :int((offsets[i]+durations[i])*sr)]
audios.append(aud)
return audios
def make_dataframe_from_text_data(data_file_or_lines, h, sr=sample_rate):
# h is a hash, which maps from audio file paths to preloaded full audio files
# column_names = ['offset','duration','audio_path','label']
column_names = ['offset', 'duration', 'subsampled_offset',
'subsampled_duration', 'audio_path', 'label']
# If it's a list of lines create a DataFrame from it
if type(data_file_or_lines) == type([]):
#lines = [l.split('\t') for l in data_file_or_lines]
df =
|
pd.DataFrame(data=data_file_or_lines, columns=column_names)
|
pandas.DataFrame
|
""" Class to interface with Operator registry backend and facilitate in alias matching."""
from __future__ import annotations
from typing import List, Set, Dict, Tuple, Optional, Callable, Any, Union
import inspect
import json
import logging
import os
from abc import ABC, abstractmethod
from datetime import datetime
import pandas as pd
from fuzzywuzzy import fuzz, process
import models
import version
from stringprocessor import StringProcessor as sp
from deco import chained, classproperty, indicate
from util.types import Scalar
from yammler import Yammler
logger = logging.getLogger(__name__)
__release__ = version.__release__
FUZZY_TARGET_LENGTH_MIN = 3
DSN = f"{__release__}".lower()
METHOD_TEMPLATE = __release__ + "/{function}"
def get_release():
v: str = "unknown"
if "version" in globals():
try:
v = version.__release__
except AttributeError as ae:
logger.debug(f"version.py has no attribute '__release__' -- {ae}")
return v
class RegistryBase(ABC):
def __init__(self, *args, **kwargs):
super().__init__()
@abstractmethod
def load(cls) -> None:
pass
@abstractmethod
def save(self) -> None:
"""Persist result to the backend"""
pass
@abstractmethod
def refresh(self) -> None:
"""Reload data from the backend"""
pass
@abstractmethod
def lookup(self, key) -> str:
pass
@abstractmethod
def add(self, key, value) -> None:
pass
@abstractmethod
def remove(self, key) -> None:
pass
@abstractmethod
def closest(self, key) -> str:
pass
@abstractmethod
def handle_date(self, value):
pass
class RegistryBackend(RegistryBase):
_value_key = "value"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cls = kwargs.pop("cls", dict) # optional class for return type
self._value_key = kwargs.pop("value_key", self._value_key)
@property
def value_key(self):
return self._value_key
# TODO: @indicate
def load(cls) -> None:
pass
def save(self) -> None:
"""Persist result to the backend"""
pass
def refresh(self) -> None:
"""Reload data from the backend"""
pass
def lookup(self, key) -> str:
pass
def add(self, key, value) -> None:
pass
def remove(self, key) -> None:
pass
def closest(self, key) -> str:
pass
def handle_date(self, value):
pass
@staticmethod
def stamp():
return datetime.utcnow()
class DataFrameBackend(RegistryBackend, pd.DataFrame):
# temporary properties
_internal_names = pd.DataFrame._internal_names
_internal_names_set = set(_internal_names)
# normal properties (persistent)
_metadata = ["load", "save", "refresh"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def _constructor(self):
return self.__class__
@classmethod
def load(cls):
pass
def save(self) -> None:
"""Persist result to the backend"""
pass
def refresh(self) -> None:
"""Persist result to the backend"""
pass
class FileBackend(RegistryBackend):
def __init__(self, fspath: str, interface=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._fspath: str = fspath
self._interface = interface # Yammler
@property
def fspath(self):
return self._fspath
def load(
self, index_col: str = "operator", date_cols: list = ["updated", "created"]
) -> pd.DataFrame:
""" Populate the index from a json file."""
df = pd.read_json(self.fspath, convert_dates=date_cols)
try:
df = df.set_index(index_col)
except KeyError:
raise KeyError(
f"Backend has no column named '{index_col}'. Try passing 'index_col = column_name' to the backend constructor. Available columns are: {df.columns.tolist()}"
)
self.source = df
return self
def save(self) -> None:
"""Persist the index"""
try:
js = json.loads(
self.reset_index().to_json(orient="records", date_format="iso")
)
with open(self._fspath, "w") as f:
f.writelines(json.dumps(js, indent=4))
logger.debug(f"Persisted registry to {self._fspath}")
except Exception as e:
logger.error(f"Failed to save {self.__class__} -- {e}")
def refresh(self) -> None:
pass
def lookup(self) -> None:
pass
def add(self, key) -> str:
pass
def remove(self, key) -> str:
pass
def closest(self, key) -> str:
pass
def _link(self):
""" Retrieve link to persistance mechanism """
return self._interface(self.fspath)
class YamlBackend(RegistryBackend):
"""Interface for a YAML backed registry. Data is manipulated in an in-memory
Pandas' DataFrame. Changes are persisted on command. The interface for this backend differs from others in that the step to persist changes is explicit. A customized interface to the YAML file on disk can be substituted using the 'interface' keyword. """
_df = None
_yaml = None
def __init__(
self,
fspath: str,
date_cols: list = None,
interface: Any = Yammler,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.date_cols = date_cols or []
self._fspath = fspath
@property
def fspath(self):
return self._fspath
@property
def df(self):
return self._df
@property
def yaml(self):
if self._yaml is None:
self._yaml = self._link()
return self._yaml
# TODO: Generalize and move to parent
@indicate
def load(self) -> YamlBackend:
""" Populate the index """
self._df = pd.DataFrame(self.yaml).T
return self
@property
def defaults(self):
return self._defaults
@defaults.setter
def defaults(self, key, value) -> None:
self._defaults[key] = value
def save(self) -> None:
"""Persist the index"""
try:
self.yaml.overwrite(self.df.to_dict(orient="index")).dump(force=True)
logger.debug(f"Persisted registry to {self.fspath}")
except Exception as e:
logger.error(f"Failed to save {self.__class__} -- {e}")
def refresh(self) -> None:
# TODO: save records updated since load time and update self.data with new records available in the backend
return self.load()
def lookup(self, key) -> self._cls:
try:
return self._cls(**self.df.loc[key].to_dict())
except KeyError:
logger.debug(f"No entry found for '{key}'")
return self._cls()
def add(self, key, value: Union[Scalar, dict]) -> str:
existing = dict(self.lookup(key))
new = dict.fromkeys(self.df.columns.tolist())
new.update({"created_at": self.stamp(), "updated_at": self.stamp()})
new.update(existing)
if isinstance(value, dict):
new[self.value_key].update(value)
else:
new[self.value_key] = value
self._df.loc[key] = new
def remove(self, key) -> str:
return 0
def closest(self, key) -> str:
return 0
def _encode_dates(self) -> pd.DataFrame:
df = self._df.copy(deep=True)
if df is not None:
for col in self.date_cols:
try:
df[col] = df[col].apply(self.handle_date)
df[col] = df[col].astype(str)
except Exception as e:
logger.debug(f"Error encoding dates in column '{col}' -- {e}")
return df
def handle_date(self, dt, default: Callable = None) -> datetime:
try:
dt = pd.to_datetime(
dt, infer_datetime_format=True, errors="raise", utc=True
) # assume unknown timezones are UTC
if not dt.tzname():
dt = dt.localize("UTC")
elif dt.tzname() != "UTC":
dt = dt.convert("UTC")
return dt
except:
logger.debug(f"Failed converting value to datetime -> {dt}")
if default:
return default()
else:
return pd.Timestamp.now()
class SQLBackend(RegistryBackend):
import models
models.connect_db()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@classmethod
def load(cls, table_name: str, index_col: str = "operator"):
""" Populate the index from a sql table"""
# df = Operator_Table.df
# df.operator = df.operator.apply(sp.normalize)
# df.operator_alias = df.operator_alias.apply(sp.normalize)
# df = df.rename(columns={"operator_alias": "alias"})
try:
import models
cnxn = models.connect_db()
cnxn["Base"].prepare(Base.metadata.bind)
op = Operator
op.cnames()
# TODO: Connect this up
except KeyError:
raise KeyError(
f"Backend has no column named '{index_col}'. Try passing 'index_col = column_name' to the backend constructor. Available columns are: {df.columns.tolist()}"
)
return df
def save(self) -> None:
"""Persist the index"""
try:
js = json.loads(
self.reset_index().to_json(orient="records", date_format="iso")
)
with open(self._fspath, "w") as f:
f.writelines(json.dumps(js, indent=4))
logger.debug(f"Persisted registry to {self._fspath}")
except Exception as e:
logger.error(f"Failed to save {self.__class__} -- {e}")
class Registry:
def __init__(self, backend: RegistryBackend):
self._backend = backend
@property
def backend(self):
return self._backend
def load(self, *args, **kwargs) -> self:
self.backend.load(*args, **kwargs)
return self
def save(self, *args, **kwargs) -> self:
self.backend.save(*args, **kwargs)
return self
@classmethod
def from_cache(cls):
pass
@classmethod
def to_cache(cls):
pass
def _capture_method(self):
caller = inspect.stack()[1][3]
return METHOD_TEMPLATE.format(function=caller)
@classmethod
def default_scorer(cls):
return fuzz.token_set_ratio
def lookup(self, target: str) -> pd.Series:
"""Look for an exact match for the target string in a list of all operator names. If found,
returns the alias for that name. Otherwise, returns None.
"""
try:
# self.refresh()
result = pd.Series(name=target)
if target is not None:
result = self.loc[target].copy()
if result.ndim > 1:
result = result.max()
result["method"] = self._capture_method()
return result
except KeyError as e:
logger.debug(f"Registry: Name {target} not found.")
except Exception as e:
logger.error(
f"Error looking up operator name ({e}) -- \n Operator Name: {target}"
)
return result
def exists(self, alias: str) -> bool:
raise NotImplementedError()
def _fuzzy_match(
self, target: str, scorer=None, score_cutoff=85, limit=1
) -> pd.Series:
"""Attempt to fuzzy match the target string to an operator name using the given scorer function.
The alias for the match with the highest score is returned. If a match with a score above the cutoff
is not found, None is returned
Arguments:
target {str} -- [description]
Keyword Arguments:
extract {str} -- 'one' or 'many'
scorer {func} -- fuzzywuzzy scorer function
-- alternative scorers:
fuzz.token_sort_ratio -> match with tokens in an ordered set
fuzz.token_set_ratio -> match with tokens as a set
fuzz.partial_ratio -> ratio of string partials
"""
scorer = scorer or self.default_scorer()
# result = pd.Series(name = target)
extracted: list = process.extractBests(
target, self.operator, scorer=scorer, limit=limit, score_cutoff=score_cutoff
)
df = pd.DataFrame.from_records(
extracted, columns=["name", "index_score", "fuzzy_score"]
)
return df
def _inspect_fuzzy_result(self, extracted: pd.DataFrame):
# if one result is passed
if len(extracted) == 1:
result = extracted
best, score = result
result = self.loc[best].copy()
# if result.ndim > 1:
# result = result.max()
else: # if > 1 result is passed
result.loc["method"] = self._capture_method()
return result
def refresh(self):
"""Get updated records from the database
"""
new = self.table.records_updated_since(self.updated.max()).set_index("operator")
new = new.rename(columns={"operator_alias": "alias", "fscore": "confidence"})
if not new.empty: # TODO: this is clunky. need to fix later
self.update(new)
for idx, values in new.iterrows():
try:
self.loc[
idx
] # try to lookup the index. Insert record if the lookup fails.
except KeyError:
self.loc[idx] = values
def diverge(cls, alias1: str):
"""Assess the distance between the names of the underlying operators produce a mean
distance from one another. If their mean distance surpasses a certain threshold, divide
the operator names at the mean and rename the alias of those aliases in the group with the
larger mean. (Alternatively, classify with sklean to find a natural break point.) Once spilt
and renamed, cross validate the two groups. If an alias produces a higher score with the alias
# from the other group, reclassify it with that other groups alias.
"""
pass
def add(op_name: str, op_alias: str):
pass
def remove(op_name: str, op_alias: str):
pass
class OperatorRegistry(Registry):
pass
class FileIndex(Registry):
# temporary properties
_internal_names = pd.DataFrame._internal_names
_internal_names_set = set(_internal_names)
table = None
# normal properties (persistent)
_metadata = ["_fp"]
def __init__(self, *args, **kwargs):
self._fp = kwargs.pop("path", None)
super().__init__(*args, **kwargs)
@property
def _constructor(self):
return FileIndex
@classmethod
def load(cls, path: str):
""" Populate the index from a json file."""
df = pd.read_json(path, convert_dates=["updated", "created"])
df = FileIndex(data=df, path=path)
if "operator" in df.columns:
df = df.set_index("operator")
return df
def save(self) -> None:
"""Save Index to file"""
try:
js = json.loads(
self.reset_index().to_json(orient="records", date_format="iso")
)
with open(self._fp, "w") as f:
f.writelines(json.dumps(js, indent=4))
logger.debug(f"Saved index to {self._fp}")
except Exception as e:
logger.error(f"Could not update database -- {e}")
def _capture_method(self):
return METHOD_TEMPLATE.format(function=inspect.stack()[1][3])
@classmethod
def default_scorer(cls):
return fuzz.token_set_ratio
def lookup(self, target: str) -> pd.Series:
"""Look for an exact match for the target string in a list of all operator names. If found,
returns the alias for that name. Otherwise, returns None.
"""
try:
# self.refresh()
result = pd.Series(name=target)
if target is not None:
result = self.loc[target].copy()
if result.ndim > 1:
result = result.max()
result["method"] = self._capture_method()
return result
except KeyError as e:
logger.debug(f"Registry: Name {target} not found.")
except Exception as e:
logger.error(
f"Error looking up operator name ({e}) -- \n Operator Name: {target}"
)
return result
def exists(self, alias: str) -> bool:
return any(self.alias == alias)
def _fuzzy_match(
self, target: str, scorer=None, score_cutoff=85, limit=1
) -> pd.Series:
"""Attempt to fuzzy match the target string to an operator name using the given scorer function.
The alias for the match with the highest score is returned. If a match with a score above the cutoff
is not found, None is returned
Arguments:
target {str} -- [description]
Keyword Arguments:
extract {str} -- 'one' or 'many'
scorer {func} -- fuzzywuzzy scorer function
-- alternative scorers:
fuzz.token_sort_ratio -> match with tokens in an ordered set
fuzz.token_set_ratio -> match with tokens as a set
fuzz.partial_ratio -> ratio of string partials
"""
scorer = scorer or self.default_scorer()
# result = pd.Series(name = target)
extracted: list = process.extractBests(
target, self.operator, scorer=scorer, limit=limit, score_cutoff=score_cutoff
)
df = pd.DataFrame.from_records(
extracted, columns=["name", "index_score", "fuzzy_score"]
)
return df
def _is_long_enough(self, name: str) -> bool:
return len(name) >= FUZZY_TARGET_LENGTH_MIN
def _inspect_fuzzy_result(self, extracted: pd.DataFrame):
# if one result is passed
if len(extracted) == 1:
result = extracted
best, score = result
result = self.loc[best].copy()
# if result.ndim > 1:
# result = result.max()
else: # if > 1 result is passed
result.loc["method"] = self._capture_method()
return result
def refresh(self):
"""Get updated records from the database
"""
new = self.table.records_updated_since(self.updated.max()).set_index("operator")
new = new.rename(columns={"operator_alias": "alias", "fscore": "confidence"})
if not new.empty: # TODO: this is clunky. need to fix later
self.update(new)
for idx, values in new.iterrows():
try:
self.loc[
idx
] # try to lookup the index. Insert record if the lookup fails.
except KeyError:
self.loc[idx] = values
def diverge(cls, alias1: str):
"""Assess the distance between the names of the underlying operators produce a mean
distance from one another. If their mean distance surpasses a certain threshold, divide
the operator names at the mean and rename the alias of those aliases in the group with the
larger mean. (Alternatively, classify with sklean to find a natural break point.) Once spilt
and renamed, cross validate the two groups. If an alias produces a higher score with the alias
# from the other group, reclassify it with that other groups alias.
"""
pass
class SQLIndex(Registry):
# temporary properties
_internal_names = pd.DataFrame._internal_names
_internal_names_set = set(_internal_names)
table = Operator_Table
# normal properties (persistent)
_metadata = [
"_capture_method",
"default_scorer",
"normalize",
"_exact_match",
"_fuzzy_match",
"_is_long_enough",
"_inspect_fuzzy_result",
"waterfall",
"table",
"update_database",
"refresh",
]
@property
def _constructor(self):
return Registry
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@classmethod
def load(cls):
""" Populate the index from a sql table. """
df = Operator_Table.df
df.operator = df.operator.apply(sp.normalize)
df.operator_alias = df.operator_alias.apply(sp.normalize)
df = df.rename(columns={"operator_alias": "alias"})
return SQLIndex(data=df).set_index("operator")
@classmethod
def save(cls, result: pd.Series) -> None:
"""Merge result into the database"""
result = result.rename(
{"name": "operator", "alias": "operator_alias", "fscore": "confidence"}
)
result = result.to_frame().T[
["operator", "operator_alias", "confidence", "method"]
]
try:
cls.table.merge_records(result)
cls.table.persist()
logger.debug(f"Updated {result.operator_alias} in database")
except Exception as e:
logger.error(f"Could not update database -- {e}")
@classmethod
def from_cache(cls):
df = pd.read_json(OPERATORPATH, convert_dates=["updated", "created"])
df = Registry(data=df).set_index("operator")
return df
@classmethod
def to_cache(cls):
cls.to_json(OPERATORPATH, orient="records", date_format="iso")
def _capture_method(self):
return METHOD_TEMPLATE.format(function=inspect.stack()[1][3])
@classmethod
def default_scorer(cls):
return fuzz.token_set_ratio
def lookup(self, target: str) -> pd.Series:
"""Look for an exact match for the target string in a list of all operator names. If found,
returns the alias for that name. Otherwise, returns None.
"""
try:
# self.refresh()
result =
|
pd.Series(name=target)
|
pandas.Series
|
import pytz
import pytest
import dateutil
import warnings
import numpy as np
from datetime import timedelta
from itertools import product
import pandas as pd
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas.core.indexes.datetimes import cdate_range
from pandas import (DatetimeIndex, PeriodIndex, Series, Timestamp, Timedelta,
date_range, TimedeltaIndex, _np_version_under1p10, Index,
datetime, Float64Index, offsets, bdate_range)
from pandas.tseries.offsets import BMonthEnd, CDay, BDay
from pandas.tests.test_base import Ops
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setup_method(self, method):
super(TestDatetimeIndexOps, self).setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
pytest.raises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
pytest.raises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
assert idx1.is_monotonic
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timestamp('2011-01-01', tz=tz)
assert idx.max() == Timestamp('2011-01-03', tz=tz)
assert idx.argmin() == 0
assert idx.argmax() == 2
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
assert pd.isna(getattr(obj, op)())
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
assert np.min(dr) == Timestamp('2016-01-15 00:00:00', freq='D')
assert np.max(dr) == Timestamp('2016-01-20 00:00:00', freq='D')
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, errmsg, np.min, dr, out=0)
tm.assert_raises_regex(ValueError, errmsg, np.max, dr, out=0)
assert np.argmin(dr) == 0
assert np.argmax(dr) == 5
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(
ValueError, errmsg, np.argmin, dr, out=0)
tm.assert_raises_regex(
ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
assert elt.round(freq='H') == expected_elt
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assert_raises_regex(ValueError, msg):
rng.round(freq='foo')
with tm.assert_raises_regex(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assert_raises_regex(ValueError, msg, rng.round, freq='M')
tm.assert_raises_regex(ValueError, msg, elt.round, freq='M')
# GH 14440 & 15578
index = pd.DatetimeIndex(['2016-10-17 12:00:00.0015'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.002000'], tz=tz)
tm.assert_index_equal(result, expected)
for freq in ['us', 'ns']:
tm.assert_index_equal(index, index.round(freq))
index = pd.DatetimeIndex(['2016-10-17 12:00:00.00149'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001000'], tz=tz)
tm.assert_index_equal(result, expected)
index = pd.DatetimeIndex(['2016-10-17 12:00:00.001501031'])
result = index.round('10ns')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001501030'])
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning():
ts = '2016-10-17 12:00:00.001501031'
pd.DatetimeIndex([ts]).round('1010ns')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
assert result.freq is None
assert len(result) == 5 * len(rng)
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assert_raises_regex(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
assert result == expected
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
assert result == expected
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
assert result == expected
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
assert idx.resolution == expected
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with pytest.raises(TypeError):
dti + dti
with pytest.raises(TypeError):
dti_tz + dti_tz
with pytest.raises(TypeError):
dti_tz + dti
with pytest.raises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
assert idx[0] in idx
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]),
check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
tm.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = idx.sort_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq is None
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
assert result == Timestamp('2011-01-01', tz=idx.tz)
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.append(idx)
assert idx_dup.freq is None # freq is reset
result = idx_dup.drop_duplicates()
tm.assert_index_equal(idx, result)
assert result.freq is None
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
assert result == Timestamp('2011-01-01', tz=idx.tz)
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
assert result.freq == freq
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
assert pd.DatetimeIndex._na_value is pd.NaT
assert pd.DatetimeIndex([])._na_value is pd.NaT
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
assert not idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
assert idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.asobject)
assert idx.asobject.equals(idx)
assert idx.asobject.equals(idx.asobject)
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.asobject)
assert not idx.asobject.equals(idx2)
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
assert not idx.equals(idx3)
assert not idx.equals(idx3.copy())
assert not idx.equals(idx3.asobject)
assert not idx.asobject.equals(idx3)
assert not idx.equals(list(idx3))
assert not idx.equals(pd.Series(idx3))
class TestDateTimeIndexToJulianDate(object):
def test_1700(self):
r1 = Float64Index([2345897.5, 2345898.5, 2345899.5, 2345900.5,
2345901.5])
r2 = date_range(start=Timestamp('1710-10-01'), periods=5,
freq='D').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_2000(self):
r1 = Float64Index([2451601.5, 2451602.5, 2451603.5, 2451604.5,
2451605.5])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='D').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_hour(self):
r1 = Float64Index(
[2451601.5, 2451601.5416666666666666, 2451601.5833333333333333,
2451601.625, 2451601.6666666666666666])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='H').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_minute(self):
r1 = Float64Index(
[2451601.5, 2451601.5006944444444444, 2451601.5013888888888888,
2451601.5020833333333333, 2451601.5027777777777777])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='T').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_second(self):
r1 = Float64Index(
[2451601.5, 2451601.500011574074074, 2451601.5000231481481481,
2451601.5000347222222222, 2451601.5000462962962962])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='S').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
# GH 10699
@pytest.mark.parametrize('klass,assert_func', zip([Series, DatetimeIndex],
[tm.assert_series_equal,
tm.assert_index_equal]))
def test_datetime64_with_DateOffset(klass, assert_func):
s = klass(date_range('2000-01-01', '2000-01-31'), name='a')
result = s + pd.DateOffset(years=1)
result2 = pd.DateOffset(years=1) + s
exp = klass(date_range('2001-01-01', '2001-01-31'), name='a')
assert_func(result, exp)
assert_func(result2, exp)
result = s - pd.DateOffset(years=1)
exp = klass(date_range('1999-01-01', '1999-01-31'), name='a')
assert_func(result, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = klass([Timestamp('2000-01-16 00:15:00', tz='US/Central'),
Timestamp('2000-02-16', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'),
Timestamp('2000-02-29', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
# array of offsets - valid for Series only
if klass is Series:
with tm.assert_produces_warning(PerformanceWarning):
s = klass([Timestamp('2000-1-1'), Timestamp('2000-2-1')])
result = s + Series([pd.offsets.DateOffset(years=1),
pd.offsets.MonthEnd()])
exp = klass([Timestamp('2001-1-1'), Timestamp('2000-2-29')
])
assert_func(result, exp)
# same offset
result = s + Series([pd.offsets.DateOffset(years=1),
pd.offsets.DateOffset(years=1)])
exp = klass([Timestamp('2001-1-1'), Timestamp('2001-2-1')])
assert_func(result, exp)
s = klass([
|
Timestamp('2000-01-05 00:15:00')
|
pandas.Timestamp
|
import argparse
from glob import glob
import numpy as np
import pandas as pd
def parse_arguments(parser):
parser.add_argument('--data_dir', type=str, default=None)
parser.add_argument('--output_dir', type=str, default=None)
parser.add_argument('--mode', type=str, default='test')
parser.add_argument('--test_file', type=str, default='test.tsv')
parser.add_argument('--text_only', type=bool, default=True)
parser.add_argument('--train_blender', type=bool, default=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
parser = argparse.ArgumentParser()
args = parse_arguments(parser)
assert (args.data_dir)
# Import the real test data
test_df = pd.read_csv(args.data_dir + '/test.csv')
# Importing the event code dictionary to convert the BERT indices
code_df = pd.read_csv(args.data_dir + '/code_dict.csv')
code_dict = dict(zip(code_df.value, code_df.event_code))
# Importing the scores from the 4 BERT runs
if args.mode == 'validate':
run_folder = 'val_runs'
elif args.mode == 'test':
run_folder = 'test_runs'
prob_list = []
for fn in sorted(glob(args.output_dir + '/[0-9]')):
print(fn)
run_probs = np.array(
pd.read_csv(fn + '/test_results.tsv', sep='\t', header=None))
test_df['event'] = [
code_dict[code] for code in np.argmax(run_probs, axis=1)
]
test_df.to_csv(fn + '/solution.csv', header=True, index=False)
prob_list.append(run_probs)
assert (prob_list)
prob_list = np.array(prob_list)
# Grouping the probabilities for regular averaging
avg_probs = np.mean(prob_list, axis=0)
print(avg_probs)
assert (np.allclose(np.sum(avg_probs, axis=1), np.ones(test_df.shape[0])))
avg_guesses = np.array(
[code_dict[code] for code in np.argmax(avg_probs, axis=1)])
# Grouping the probabilities for blending
wide_probs = np.concatenate(prob_list, axis=1)
# Producing guesses when only the input text is available
if args.text_only:
# Loading the blender model
# lgr = joblib.load(args.data_dir + 'blender.joblib')
# blend_guesses = lgr.predict(wide_probs)
# blend_probs = np.max(lgr.predict_proba(wide_probs), axis=1)
# print(blend_probs[0])
# Exporting the guesses to disk
ids = pd.read_csv(args.data_dir + '/' + args.test_file, sep='\t')['id']
guess_df = pd.DataFrame(
pd.concat([
ids,
pd.Series(avg_guesses),
pd.Series(np.max(avg_probs, axis=1))
],
axis=1))
guess_df.columns = ['id', 'avg_guess', 'avg_prob']
guess_df.to_csv(args.output_dir + '/guesses.csv',
header=True,
index=False)
test_df['event'] = avg_guesses
test_df.to_csv(args.output_dir + '/solution.csv',
header=True,
index=False)
# Producing guesses and scores when the labels are also available
else:
# Getting the guesses from the blending model
if args.train_blender:
targets = pd.read_csv(args.data_dir + '/' +
args.test_file)['event']
lgr = LogisticRegression()
lgr.fit(wide_probs, targets)
joblib.dump(lgr, args.data_dir + 'blender.joblib')
else:
lgr = joblib.load(args.data_dir + 'blender.joblib')
blend_guesses = lgr.predict(wide_probs)
# Importing the test records and getting the various scores
test_records =
|
pd.read_csv(args.data_dir + args.test_file)
|
pandas.read_csv
|
"""
Test various functions regarding chapter 18: Microstructural Features.
"""
import os
import unittest
import numpy as np
import pandas as pd
from mlfinlab.data_structures import get_volume_bars
from mlfinlab.microstructural_features import (get_vpin, get_bar_based_amihud_lambda, get_bar_based_kyle_lambda,
get_bekker_parkinson_vol, get_corwin_schultz_estimator,
get_bar_based_hasbrouck_lambda, get_roll_impact, get_roll_measure,
quantile_mapping, sigma_mapping, MicrostructuralFeaturesGenerator)
from mlfinlab.microstructural_features.encoding import encode_tick_rule_array
from mlfinlab.microstructural_features.entropy import get_plug_in_entropy, get_shannon_entropy, get_lempel_ziv_entropy, \
get_konto_entropy, _match_length
from mlfinlab.util import get_bvc_buy_volume
class TestMicrostructuralFeatures(unittest.TestCase):
"""
Test get_inter_bar_features, test_first_generation, test_second_generation, test_misc
"""
def setUp(self):
"""
Set the file path for the sample dollar bars data.
"""
project_path = os.path.dirname(__file__)
self.path = project_path + '/test_data/dollar_bar_sample.csv'
self.trades_path = project_path + '/test_data/tick_data.csv'
self.data = pd.read_csv(self.path, index_col='date_time', parse_dates=[0])
self.data.index = pd.to_datetime(self.data.index)
def test_first_generation(self):
"""
Test first generation intra-bar features
"""
roll_measure = get_roll_measure(self.data.close, window=20)
roll_impact = get_roll_impact(self.data.close, self.data.cum_dollar, window=20)
corwin_schultz = get_corwin_schultz_estimator(self.data.high, self.data.low, window=20)
bekker_parkinson = get_bekker_parkinson_vol(self.data.high, self.data.low, window=20)
# Shape assertions
self.assertEqual(self.data.shape[0], roll_measure.shape[0])
self.assertEqual(self.data.shape[0], roll_impact.shape[0])
self.assertEqual(self.data.shape[0], corwin_schultz.shape[0])
self.assertEqual(self.data.shape[0], bekker_parkinson.shape[0])
# Roll measure/impact assertions
self.assertAlmostEqual(roll_measure.max(), 7.1584, delta=1e-4)
self.assertAlmostEqual(roll_measure.mean(), 2.341, delta=1e-3)
self.assertAlmostEqual(roll_measure[25], 1.176, delta=1e-3) # Test some random value
self.assertAlmostEqual(roll_impact.max(), 1.022e-7, delta=1e-7)
self.assertAlmostEqual(roll_impact.mean(), 3.3445e-8, delta=1e-7)
self.assertAlmostEqual(roll_impact[25], 1.6807e-8, delta=1e-4)
# Test Corwin-Schultz
self.assertAlmostEqual(corwin_schultz.max(), 0.01652, delta=1e-4)
self.assertAlmostEqual(corwin_schultz.mean(), 0.00151602, delta=1e-4)
self.assertAlmostEqual(corwin_schultz[25], 0.00139617, delta=1e-4)
self.assertAlmostEqual(bekker_parkinson.max(), 0.018773, delta=1e-4)
self.assertAlmostEqual(bekker_parkinson.mean(), 0.001456, delta=1e-4)
self.assertAlmostEqual(bekker_parkinson[25], 0.000517, delta=1e-4)
def test_second_generation_intra_bar(self):
"""
Test intra-bar second generation features
"""
kyle_lambda = get_bar_based_kyle_lambda(self.data.close, self.data.cum_vol, window=20)
amihud_lambda = get_bar_based_amihud_lambda(self.data.close, self.data.cum_dollar, window=20)
hasbrouck_lambda = get_bar_based_hasbrouck_lambda(self.data.close, self.data.cum_dollar, window=20)
# Shape assertions
self.assertEqual(self.data.shape[0], kyle_lambda.shape[0])
self.assertEqual(self.data.shape[0], amihud_lambda.shape[0])
self.assertEqual(self.data.shape[0], hasbrouck_lambda.shape[0])
# Test Kyle Lambda
self.assertAlmostEqual(kyle_lambda.max(), 0.000163423, delta=1e-6)
self.assertAlmostEqual(kyle_lambda.mean(), 7.02e-5, delta=1e-6)
self.assertAlmostEqual(kyle_lambda[25], 7.76e-5, delta=1e-6) # Test some random value
# Test Amihud Lambda
self.assertAlmostEqual(amihud_lambda.max(), 4.057838e-11, delta=1e-13)
self.assertAlmostEqual(amihud_lambda.mean(), 1.7213e-11, delta=1e-13)
self.assertAlmostEqual(amihud_lambda[25], 1.8439e-11, delta=1e-13)
# Test Hasbrouck lambda
self.assertAlmostEqual(hasbrouck_lambda.max(), 3.39527e-7, delta=1e-10)
self.assertAlmostEqual(hasbrouck_lambda.mean(), 1.44037e-7, delta=1e-10)
self.assertAlmostEqual(hasbrouck_lambda[25], 1.5433e-7, delta=1e-10)
def test_third_generation(self):
"""
Test third generation features
"""
bvc_buy_volume = get_bvc_buy_volume(self.data.close, self.data.cum_vol, window=20)
vpin_1 = get_vpin(self.data.cum_vol, bvc_buy_volume)
vpin_20 = get_vpin(self.data.cum_vol, bvc_buy_volume, window=20)
self.assertEqual(self.data.shape[0], vpin_1.shape[0])
self.assertEqual(self.data.shape[0], vpin_20.shape[0])
self.assertAlmostEqual(vpin_1.max(), 0.999, delta=1e-3)
self.assertAlmostEqual(vpin_1.mean(), 0.501, delta=1e-3)
self.assertAlmostEqual(vpin_1[25], 0.554, delta=1e-3)
self.assertAlmostEqual(vpin_20.max(), 0.6811, delta=1e-3)
self.assertAlmostEqual(vpin_20.mean(), 0.500, delta=1e-3)
self.assertAlmostEqual(vpin_20[45], 0.4638, delta=1e-3)
def test_tick_rule_encoding(self):
"""
Test tick rule encoding function
"""
with self.assertRaises(ValueError):
encode_tick_rule_array([-1, 1, 0, 20000000])
encoded_tick_rule = encode_tick_rule_array([-1, 1, 0, 0])
self.assertEqual('bacc', encoded_tick_rule)
def test_entropy_calculations(self):
"""
Test entropy functions
"""
message = '11100001'
message_array = [1, 1, 1, 0, 0, 0, 0, 1]
shannon = get_shannon_entropy(message)
plug_in = get_plug_in_entropy(message, word_length=1)
plug_in_arr = get_plug_in_entropy(message_array, word_length=1)
lempel = get_lempel_ziv_entropy(message)
konto = get_konto_entropy(message)
self.assertEqual(plug_in, plug_in_arr)
self.assertAlmostEqual(shannon, 1.0, delta=1e-3)
self.assertAlmostEqual(lempel, 0.625, delta=1e-3)
self.assertAlmostEqual(plug_in, 0.985, delta=1e-3)
self.assertAlmostEqual(konto, 0.9682, delta=1e-3)
# Konto entropy boundary conditions
konto_2 = get_konto_entropy(message, 2)
_match_length('1101111', 2, 3)
self.assertAlmostEqual(konto_2, 0.8453, delta=1e-4)
self.assertEqual(get_konto_entropy('a'), 0) # one-character message entropy = 0
def test_encoding_schemes(self):
"""
Test quantile and sigma encoding
"""
values = np.arange(0, 1000, 1)
quantile_dict = quantile_mapping(values, num_letters=10)
sigma_dict = sigma_mapping(values, step=20)
self.assertEqual(len(quantile_dict), 10)
self.assertEqual(quantile_dict[229.77], '\x02')
self.assertEqual(len(sigma_dict), np.ceil((max(values) - min(values)) / 20))
self.assertEqual(sigma_dict[100], '\x05')
with self.assertRaises(ValueError):
sigma_mapping(values, step=1) # Length of dice > ASCII table
def test_csv_format(self):
"""
Asserts that the csv data being passed is of the correct format.
"""
wrong_date = ['2019-41-30', 200.00, np.int64(5)]
wrong_price = ['2019-01-30', 'asd', np.int64(5)]
wrong_volume = ['2019-01-30', 200.00, '1.5']
too_many_cols = ['2019-01-30', 200.00, np.int64(5), 'Limit order', 'B23']
# pylint: disable=protected-access
self.assertRaises(ValueError,
MicrostructuralFeaturesGenerator._assert_csv(pd.DataFrame(wrong_date).T))
# pylint: disable=protected-access
self.assertRaises(AssertionError,
MicrostructuralFeaturesGenerator._assert_csv,
|
pd.DataFrame(too_many_cols)
|
pandas.DataFrame
|
"""Test numpy engine."""
import hypothesis.strategies as st
import pandas as pd
import pytest
from hypothesis import given
from pandera.engines import pandas_engine
from pandera.errors import ParserError
@pytest.mark.parametrize(
"data_type", list(pandas_engine.Engine.get_registered_dtypes())
)
def test_pandas_data_type(data_type):
"""Test numpy engine DataType base class."""
if data_type.type is None:
# don't test data types that require parameters e.g. Category
return
pandas_engine.Engine.dtype(data_type)
pandas_engine.Engine.dtype(data_type.type)
pandas_engine.Engine.dtype(str(data_type.type))
with pytest.warns(UserWarning):
pd_dtype = pandas_engine.DataType(data_type.type)
with pytest.warns(UserWarning):
pd_dtype_from_str = pandas_engine.DataType(str(data_type.type))
assert pd_dtype == pd_dtype_from_str
assert not pd_dtype.check("foo")
@pytest.mark.parametrize(
"data_type", list(pandas_engine.Engine.get_registered_dtypes())
)
def test_pandas_data_type_coerce(data_type):
"""
Test that pandas data type coercion will raise a ParserError. on failure.
"""
if data_type.type is None:
# don't test data types that require parameters e.g. Category
return
try:
data_type().try_coerce(pd.Series(["1", "2", "a"]))
except ParserError as exc:
assert exc.failure_cases.shape[0] > 0
CATEGORIES = ["A", "B", "C"]
@given(st.lists(st.sampled_from(CATEGORIES), min_size=5))
def test_pandas_category_dtype(data):
"""Test pandas_engine.Category correctly coerces valid categorical data."""
data = pd.Series(data)
dtype = pandas_engine.Category(CATEGORIES)
coerced_data = dtype.coerce(data)
assert dtype.check(coerced_data.dtype)
for _, value in data.iteritems():
coerced_value = dtype.coerce_value(value)
assert coerced_value in CATEGORIES
@given(st.lists(st.sampled_from(["X", "Y", "Z"]), min_size=5))
def test_pandas_category_dtype_error(data):
"""Test pandas_engine.Category raises TypeErrors on invalid data."""
data = pd.Series(data)
dtype = pandas_engine.Category(CATEGORIES)
with pytest.raises(TypeError):
dtype.coerce(data)
for _, value in data.iteritems():
with pytest.raises(TypeError):
dtype.coerce_value(value)
@given(st.lists(st.sampled_from([1, 0, 1.0, 0.0, True, False]), min_size=5))
def test_pandas_boolean_native_type(data):
"""Test native pandas bool type correctly coerces valid bool-like data."""
data = pd.Series(data)
dtype = pandas_engine.Engine.dtype("boolean")
# the BooleanDtype can't handle Series of non-boolean, mixed dtypes
if data.dtype == "object":
with pytest.raises(TypeError):
dtype.coerce(data)
else:
coerced_data = dtype.coerce(data)
assert dtype.check(coerced_data.dtype)
for _, value in data.iteritems():
dtype.coerce_value(value)
@given(st.lists(st.sampled_from(["A", "True", "False", 5, -1]), min_size=5))
def test_pandas_boolean_native_type_error(data):
"""Test native pandas bool type raises TypeErrors on non-bool-like data."""
data =
|
pd.Series(data)
|
pandas.Series
|
from datetime import datetime
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.graph_objs as go
# Get Data
confirmed = pd.read_csv(
'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')
recovered = pd.read_csv(
'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv')
death = pd.read_csv(
'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv')
# Find dates
assert (len(confirmed.columns) == len(death.columns) == len(recovered.columns))
dates = []
for d in confirmed.columns:
try:
datetime.strptime(d, '%m/%d/%y')
dates.append(d)
except ValueError:
continue
# Helper Functions
def updateDF(df, newDF, dates):
for date in dates:
if len(newDF) == 0:
newDF = df.groupby(['Country/Region'])[date].apply(sum).reset_index().copy()
else:
temp = df.groupby(['Country/Region'])[date].apply(sum).reset_index().copy()
newDF = pd.merge(left=newDF, right=temp, on=['Country/Region'], how='left')
return newDF
def getCountryTimeData(df, cn):
return df[df['Country/Region'] == cn].values.flatten()[1:].tolist()
# Process data by country - Aggregating states and provinces into country
newConfirmed = pd.DataFrame()
confirmed = updateDF(df=confirmed, newDF=newConfirmed, dates=dates)
print('*\n')
newDeath = pd.DataFrame()
death = updateDF(df=death, newDF=newDeath, dates=dates)
print('*\n')
newRecovered =
|
pd.DataFrame()
|
pandas.DataFrame
|
from datetime import datetime, timedelta
from importlib import reload
import string
import sys
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
Timedelta,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestSeriesDtypes:
def test_dt64_series_astype_object(self):
dt64ser = Series(date_range("20130101", periods=3))
result = dt64ser.astype(object)
assert isinstance(result.iloc[0], datetime)
assert result.dtype == np.object_
def test_td64_series_astype_object(self):
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="timedelta64[ns]")
result = tdser.astype(object)
assert isinstance(result.iloc[0], timedelta)
assert result.dtype == np.object_
@pytest.mark.parametrize("dtype", ["float32", "float64", "int64", "int32"])
def test_astype(self, dtype):
s = Series(np.random.randn(5), name="foo")
as_typed = s.astype(dtype)
assert as_typed.dtype == dtype
assert as_typed.name == s.name
def test_dtype(self, datetime_series):
assert datetime_series.dtype == np.dtype("float64")
assert datetime_series.dtypes == np.dtype("float64")
@pytest.mark.parametrize("value", [np.nan, np.inf])
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
def test_astype_cast_nan_inf_int(self, dtype, value):
# gh-14265: check NaN and inf raise error when converting to int
msg = "Cannot convert non-finite values \\(NA or inf\\) to integer"
s = Series([value])
with pytest.raises(ValueError, match=msg):
s.astype(dtype)
@pytest.mark.parametrize("dtype", [int, np.int8, np.int64])
def test_astype_cast_object_int_fail(self, dtype):
arr = Series(["car", "house", "tree", "1"])
msg = r"invalid literal for int\(\) with base 10: 'car'"
with pytest.raises(ValueError, match=msg):
arr.astype(dtype)
def test_astype_cast_object_int(self):
arr = Series(["1", "2", "3", "4"], dtype=object)
result = arr.astype(int)
tm.assert_series_equal(result, Series(np.arange(1, 5)))
def test_astype_datetime(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0)])
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)])
s[1] = np.nan
assert s.dtype == "M8[ns]"
s = s.astype("O")
assert s.dtype == np.object_
def test_astype_datetime64tz(self):
s = Series(date_range("20130101", periods=3, tz="US/Eastern"))
# astype
result = s.astype(object)
expected = Series(s.astype(object), dtype=object)
tm.assert_series_equal(result, expected)
result = Series(s.values).dt.tz_localize("UTC").dt.tz_convert(s.dt.tz)
tm.assert_series_equal(result, s)
# astype - object, preserves on construction
result = Series(s.astype(object))
expected = s.astype(object)
tm.assert_series_equal(result, expected)
# astype - datetime64[ns, tz]
result = Series(s.values).astype("datetime64[ns, US/Eastern]")
tm.assert_series_equal(result, s)
result = Series(s.values).astype(s.dtype)
tm.assert_series_equal(result, s)
result = s.astype("datetime64[ns, CET]")
expected = Series(date_range("20130101 06:00:00", periods=3, tz="CET"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [str, np.str_])
@pytest.mark.parametrize(
"series",
[
Series([string.digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series([string.digits * 10, tm.rands(63), tm.rands(64), np.nan, 1.0]),
],
)
def test_astype_str_map(self, dtype, series):
# see gh-4405
result = series.astype(dtype)
expected = series.map(str)
tm.assert_series_equal(result, expected)
def test_astype_str_cast_dt64(self):
# see gh-9757
ts = Series([Timestamp("2010-01-04 00:00:00")])
s = ts.astype(str)
expected = Series([str("2010-01-04")])
tm.assert_series_equal(s, expected)
ts = Series([Timestamp("2010-01-04 00:00:00", tz="US/Eastern")])
s = ts.astype(str)
expected = Series([str("2010-01-04 00:00:00-05:00")])
tm.assert_series_equal(s, expected)
def test_astype_str_cast_td64(self):
# see gh-9757
td = Series([Timedelta(1, unit="d")])
ser = td.astype(str)
expected = Series([str("1 days")])
tm.assert_series_equal(ser, expected)
def test_astype_unicode(self):
# see gh-7758: A bit of magic is required to set
# default encoding to utf-8
digits = string.digits
test_series = [
Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series(["データーサイエンス、お前はもう死んでいる"]),
]
former_encoding = None
if sys.getdefaultencoding() == "utf-8":
test_series.append(Series(["野菜食べないとやばい".encode("utf-8")]))
for s in test_series:
res = s.astype("unicode")
expec = s.map(str)
tm.assert_series_equal(res, expec)
# Restore the former encoding
if former_encoding is not None and former_encoding != "utf-8":
reload(sys)
sys.setdefaultencoding(former_encoding)
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# see gh-7271
s = Series(range(0, 10, 2), name="abc")
dt1 = dtype_class({"abc": str})
result = s.astype(dt1)
expected = Series(["0", "2", "4", "6", "8"], name="abc")
tm.assert_series_equal(result, expected)
dt2 = dtype_class({"abc": "float64"})
result = s.astype(dt2)
expected = Series([0.0, 2.0, 4.0, 6.0, 8.0], dtype="float64", name="abc")
tm.assert_series_equal(result, expected)
dt3 = dtype_class({"abc": str, "def": str})
msg = (
"Only the Series name can be used for the key in Series dtype "
r"mappings\."
)
with pytest.raises(KeyError, match=msg):
s.astype(dt3)
dt4 = dtype_class({0: str})
with pytest.raises(KeyError, match=msg):
s.astype(dt4)
# GH16717
# if dtypes provided is empty, it should error
if dtype_class is Series:
dt5 = dtype_class({}, dtype=object)
else:
dt5 = dtype_class({})
with pytest.raises(KeyError, match=msg):
s.astype(dt5)
def test_astype_categories_raises(self):
# deprecated 17636, removed in GH-27141
s = Series(["a", "b", "a"])
with pytest.raises(TypeError, match="got an unexpected"):
s.astype("category", categories=["a", "b"], ordered=True)
def test_astype_from_categorical(self):
items = ["a", "b", "c", "a"]
s = Series(items)
exp = Series(Categorical(items))
res = s.astype("category")
tm.assert_series_equal(res, exp)
items = [1, 2, 3, 1]
s = Series(items)
exp = Series(Categorical(items))
res = s.astype("category")
tm.assert_series_equal(res, exp)
df = DataFrame({"cats": [1, 2, 3, 4, 5, 6], "vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = DataFrame(
{"cats": ["a", "b", "b", "a", "a", "d"], "vals": [1, 2, 3, 4, 5, 6]}
)
cats = Categorical(["a", "b", "b", "a", "a", "d"])
exp_df = DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
lst = ["a", "b", "c", "a"]
s = Series(lst)
exp = Series(Categorical(lst, ordered=True))
res = s.astype(CategoricalDtype(None, ordered=True))
tm.assert_series_equal(res, exp)
exp = Series(Categorical(lst, categories=list("abcdef"), ordered=True))
res = s.astype(CategoricalDtype(list("abcdef"), ordered=True))
tm.assert_series_equal(res, exp)
def test_astype_categorical_to_other(self):
value = np.random.RandomState(0).randint(0, 10000, 100)
df = DataFrame({"value": value})
labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
s = df["value_group"]
expected = s
tm.assert_series_equal(s.astype("category"), expected)
tm.assert_series_equal(s.astype(CategoricalDtype()), expected)
msg = r"could not convert string to float|invalid literal for float\(\)"
with pytest.raises(ValueError, match=msg):
s.astype("float64")
cat = Series(Categorical(["a", "b", "b", "a", "a", "c", "c", "c"]))
exp = Series(["a", "b", "b", "a", "a", "c", "c", "c"])
tm.assert_series_equal(cat.astype("str"), exp)
s2 = Series(Categorical(["1", "2", "3", "4"]))
exp2 = Series([1, 2, 3, 4]).astype(int)
tm.assert_series_equal(s2.astype("int"), exp2)
# object don't sort correctly, so just compare that we have the same
# values
def cmp(a, b):
tm.assert_almost_equal(np.sort(np.unique(a)), np.sort(np.unique(b)))
expected = Series(np.array(s.values), name="value_group")
cmp(s.astype("object"), expected)
cmp(s.astype(np.object_), expected)
# array conversion
tm.assert_almost_equal(np.array(s), np.array(s.values))
tm.assert_series_equal(s.astype("category"), s)
tm.assert_series_equal(s.astype(CategoricalDtype()), s)
roundtrip_expected = s.cat.set_categories(
s.cat.categories.sort_values()
).cat.remove_unused_categories()
tm.assert_series_equal(
s.astype("object").astype("category"), roundtrip_expected
)
tm.assert_series_equal(
s.astype("object").astype(CategoricalDtype()), roundtrip_expected
)
# invalid conversion (these are NOT a dtype)
msg = (
"dtype '<class 'pandas.core.arrays.categorical.Categorical'>' "
"not understood"
)
for invalid in [
lambda x: x.astype(Categorical),
lambda x: x.astype("object").astype(Categorical),
]:
with pytest.raises(TypeError, match=msg):
invalid(s)
@pytest.mark.parametrize("name", [None, "foo"])
@pytest.mark.parametrize("dtype_ordered", [True, False])
@pytest.mark.parametrize("series_ordered", [True, False])
def test_astype_categorical_to_categorical(
self, name, dtype_ordered, series_ordered
):
# GH 10696/18593
s_data = list("abcaacbab")
s_dtype = CategoricalDtype(list("bac"), ordered=series_ordered)
s = Series(s_data, dtype=s_dtype, name=name)
# unspecified categories
dtype = CategoricalDtype(ordered=dtype_ordered)
result = s.astype(dtype)
exp_dtype = CategoricalDtype(s_dtype.categories, dtype_ordered)
expected = Series(s_data, name=name, dtype=exp_dtype)
tm.assert_series_equal(result, expected)
# different categories
dtype = CategoricalDtype(list("adc"), dtype_ordered)
result = s.astype(dtype)
expected = Series(s_data, name=name, dtype=dtype)
tm.assert_series_equal(result, expected)
if dtype_ordered is False:
# not specifying ordered, so only test once
expected = s
result = s.astype("category")
tm.assert_series_equal(result, expected)
def test_astype_bool_missing_to_categorical(self):
# GH-19182
s = Series([True, False, np.nan])
assert s.dtypes == np.object_
result = s.astype(CategoricalDtype(categories=[True, False]))
expected = Series(Categorical([True, False, np.nan], categories=[True, False]))
tm.assert_series_equal(result, expected)
def test_astype_categoricaldtype(self):
s = Series(["a", "b", "a"])
result = s.astype(CategoricalDtype(["a", "b"], ordered=True))
expected = Series(Categorical(["a", "b", "a"], ordered=True))
tm.assert_series_equal(result, expected)
result = s.astype(CategoricalDtype(["a", "b"], ordered=False))
expected = Series(Categorical(["a", "b", "a"], ordered=False))
tm.assert_series_equal(result, expected)
result = s.astype(CategoricalDtype(["a", "b", "c"], ordered=False))
expected = Series(
Categorical(["a", "b", "a"], categories=["a", "b", "c"], ordered=False)
)
tm.assert_series_equal(result, expected)
tm.assert_index_equal(result.cat.categories, Index(["a", "b", "c"]))
@pytest.mark.parametrize("dtype", [np.datetime64, np.timedelta64])
def test_astype_generic_timestamp_no_frequency(self, dtype, request):
# see gh-15524, gh-15987
data = [1]
s = Series(data)
if np.dtype(dtype).name not in ["timedelta64", "datetime64"]:
mark = pytest.mark.xfail(reason="GH#33890 Is assigned ns unit")
request.node.add_marker(mark)
msg = (
fr"The '{dtype.__name__}' dtype has no unit\. "
fr"Please pass in '{dtype.__name__}\[ns\]' instead."
)
with pytest.raises(ValueError, match=msg):
s.astype(dtype)
@pytest.mark.parametrize("dtype", np.typecodes["All"])
def test_astype_empty_constructor_equality(self, dtype):
# see gh-15524
if dtype not in (
"S",
"V", # poor support (if any) currently
"M",
"m", # Generic timestamps raise a ValueError. Already tested.
):
init_empty = Series([], dtype=dtype)
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
as_type_empty = Series([]).astype(dtype)
tm.assert_series_equal(init_empty, as_type_empty)
def test_arg_for_errors_in_astype(self):
# see gh-14878
s = Series([1, 2, 3])
msg = (
r"Expected value of kwarg 'errors' to be one of \['raise', "
r"'ignore'\]\. Supplied value is 'False'"
)
with pytest.raises(ValueError, match=msg):
s.astype(np.float64, errors=False)
s.astype(np.int8, errors="raise")
def test_intercept_astype_object(self):
series = Series(date_range("1/1/2000", periods=10))
# This test no longer makes sense, as
# Series is by default already M8[ns].
expected = series.astype("object")
df = DataFrame({"a": series, "b": np.random.randn(len(series))})
exp_dtypes = Series(
[np.dtype("datetime64[ns]"), np.dtype("float64")], index=["a", "b"]
)
tm.assert_series_equal(df.dtypes, exp_dtypes)
result = df.values.squeeze()
assert (result[:, 0] == expected.values).all()
df = DataFrame({"a": series, "b": ["foo"] * len(series)})
result = df.values.squeeze()
assert (result[:, 0] == expected.values).all()
def test_series_to_categorical(self):
# see gh-16524: test conversion of Series to Categorical
series = Series(["a", "b", "c"])
result = Series(series, dtype="category")
expected = Series(["a", "b", "c"], dtype="category")
tm.assert_series_equal(result, expected)
def test_infer_objects_series(self):
# GH 11221
actual = Series(np.array([1, 2, 3], dtype="O")).infer_objects()
expected =
|
Series([1, 2, 3])
|
pandas.Series
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 22 11:19:49 2020
trying to automte everything if problem go to version 14
@author: Devineni
"""
import pandas as pd
import numpy as np
import statistics
from statistics import mean
import time
import datetime as dt
import matplotlib.pyplot as plt
pd.options.plotting.backend = "matplotlib"
import operator # for plotting
from openpyxl import load_workbook
# import mysql.connector
import os
import pymysql
from sqlalchemy import create_engine
from easygui import *
import sys
#from recalibration import clean_sql_reg
def prRed(skk): print("\033[31;1;m {}\033[00m" .format(skk))
def prYellow(skk): print("\033[33;1;m {}\033[00m" .format(skk))
import warnings
warnings.filterwarnings('ignore')
from uncertainties import ufloat
from uncertainties import unumpy
#%% control plot properties
import datetime
import matplotlib.dates as mdates
import matplotlib.units as munits
from pylab import rcParams
rcParams['figure.figsize'] = 7,4.5
plt.rcParams["font.family"] = "calibri"
plt.rcParams["font.weight"] = "normal"
plt.rcParams["font.size"] = 10
#%% Selection Message box
'''
This section deals with taking input selection of the experiment
easygui module was used to create the dialogue boxes for easy input
this is just a more visual way for experiment selection
'''
msg ="Please select a Location/Season you like to analyze"
title = "Season selection"
choices = ["ESHL_summer", "ESHL_winter", "CBo_summer", "CBo_winter"]
database = choicebox(msg, title, choices)
times = pd.read_excel('Times_thesis.xlsx', sheet_name= database)
for experiment in times['short name']:
z = int(times[times['short name'] == experiment].index.values)
Vdot_sheets = {"ESHL_summer":"ESHL_Vdot", "ESHL_winter":"ESHL_Vdot", "CBo_summer":"CBo_Vdot", "CBo_winter":"CBo_Vdot"}
#%%
'''
This code extracts the correct value of Vdot volume in m3 hr-1 required for
the experiment based on svens paper reference to be updated
for now this is only for intensive ventilation. This is to be further automated
for entire selection
'''
if Vdot_sheets[database] == "ESHL_Vdot":
Vdot = pd.read_excel("Vdot_results.xlsx", sheet_name = Vdot_sheets[database])
Kü_20_sup = ufloat(Vdot.at[0,'Vdot_sup'], Vdot.at[0,'Vdot_sup_uncertainity'])
d = {}
for i in range(len(Vdot)):
d[Vdot.loc[i,"Level"] + "_sup"] = ufloat(Vdot.at[i,'Vdot_sup'], Vdot.at[i,'Vdot_sup_uncertainity'])
d[Vdot.loc[i,"Level"] + "_exh"] = ufloat(-Vdot.at[i,'Vdot_exh'], Vdot.at[i,'Vdot_exh_uncertainity'])
x = pd.DataFrame(d, index=["Vdot"])
x = x.transpose()
'''
Form the results excel sheet, the Vdot required for out experiment is calculated below
'''
i = z
wz = int(times.at[i,'Volume flow (SZ, WZ)'])
bd = int(times.at[i,'Volume flow (BD)'])
ku = int(times.at[i,'Volume flow (Kü)'])
ku_ex = int(times.at[i,'Volume flow (Kü_exhaust)'])
print(times.at[i,'Name'])
a = max(x.at['SZ02_'+ str(wz) +'_sup','Vdot'] + x.at['WZ_'+ str(wz) +'_sup','Vdot'] + x.at['BD_'+ str(bd) +'_sup','Vdot'], x.at['SZ01_'+ str(wz) +'_exh','Vdot'] + x.at['Kü_'+ str(ku) +'_exh','Vdot'] + x.at['BD_'+ str(bd) +'_exh','Vdot'] + x.at['Kü_Ex_'+ str(ku_ex) +'_exh','Vdot']) #
b = max(x.at['SZ02_'+ str(wz) +'_exh','Vdot'] + x.at['WZ_'+ str(wz) +'_exh','Vdot'] + x.at['BD_'+ str(bd) +'_exh','Vdot'] + x.at['Kü_Ex_'+ str(ku_ex) +'_exh','Vdot'], x.at['SZ01_'+ str(wz) +'_sup','Vdot'] + x.at['Kü_'+ str(ku) +'_sup','Vdot'] + x.at['BD_'+ str(bd) +'_sup','Vdot'])
Vdot_imported = (a+b)/2
print(f"Vdot = {Vdot_imported}\n")
level = "wz={wz}_ku={ku}_bd={bd}_kuexh={ku_ex}".format(wz=wz,ku=ku,bd=bd,ku_ex=ku_ex)
else:
print(times.at[z,'Name'])
Vdot = pd.read_excel("Vdot_results.xlsx", sheet_name = Vdot_sheets[database])
d = {}
for i in range(len(Vdot)):
d[Vdot.loc[i,"Level"] + "_sup"] = ufloat(Vdot.at[i,'Vdot_sup'], Vdot.at[i,'Vdot_sup_uncertainity'])
d[Vdot.loc[i,"Level"] + "_exh"] = ufloat(-Vdot.at[i,'Vdot_exh'], Vdot.at[i,'Vdot_exh_uncertainity'])
x = pd.DataFrame(d, index=["Vdot"])
x = x.transpose()
sz = times.at[z,'Volume flow (SZ)']
k1 = times.at[z,'Volume flow (K1)']
k2 = times.at[z,'Volume flow (K2)']
ex = int(times.at[z,'Volume flow (BD)'])
x.loc["BD_0_exh", "Vdot"] = ufloat(0,0)
x.loc["BD_100_exh", "Vdot"] = ufloat(47.4,12) # data given from sven uncertainity is selected as average of uncertainities of other sensors
a = max(x.at['K1_'+ str(k1) +'_sup','Vdot'] + x.at['K2_'+ str(k2) +'_sup','Vdot'], x.at['SZ_'+ str(sz) +'_exh','Vdot'] + x.at['BD_'+ str(ex) +'_exh','Vdot']) #
b = max(x.at['K1_'+ str(k1) +'_exh','Vdot'] + x.at['K2_'+ str(k2) +'_exh','Vdot'] + x.at['BD_'+ str(ex) +'_exh','Vdot'], x.at['SZ_'+ str(sz) +'_sup','Vdot'] )
Vdot_imported = (a+b)/2
level = times.loc[z,"short name"]
print(f"Vdot = {Vdot_imported}\n")
#%%
folder_name = times.loc[z,"short name"]
path = folder_name
os.mkdir(path)
#%% Fixed Input Data
"""
###############################################################################
## Input Data ###############################################################
###############################################################################
"""
experiment = z
info = times.loc[[z]]
"""
The time stamp of the initialisation of the measurement has to be setted to the
one where the behaviour of the measurement values for the temperature, humidity,
CO2-concentration and flow-speed behind the indoor hood of the ventilation
devices changes significantly. This time stamp has to be at the end of the
homogenisation period.
"""
t0 = times["Start"][experiment]
start, end = str(times["Start"][experiment] - dt.timedelta(minutes=10)), str(times["End"][experiment])
#start, end = str(times["Start"][experiment] - dt.timedelta(minutes=10)), str(times["Start"][experiment] + dt.timedelta(hours=5))
print(database)
print(times["Name"][experiment])
print(times["short name"][experiment])
print("\nThe sensors excluded in this evaluation are:\n{}\n".format(times["exclude"][z]))
"""
Document with the list of all filenames belonging to the current evaluation.
"""
if "summer" in database:
season = 'Summer'
else:
season = 'Winter'
if "ESHL" in database:
"""
Geometry of the indoor volume ESHL
"""
area = (3.005 * 3.2 *2) + (4.650 * 6.417) + (1.620 + 3.817) + (1.5 * 2.4) # #2 times l * b of bedrooms + l * b of living room from Saebu_Containermaße
height = 2.5 # m # not considering the space after the Anhangdecke
blocked_volume = ufloat(11.39, 1.14)
volume = area * height
v = volume - blocked_volume
else:
"""
Geometry of the indoor volume CBo
"""
area = 88.7 # in m²
height = 1.8 + 0.19 + 0.23 # m
blocked_volume = ufloat(12.05, 1.20)
volume = area * height
v = volume - blocked_volume
"""
The ventilation level choosen by the user results in a volume flow provided by
the ventilation system.
"""
Vdot = Vdot_imported
"""
Period time of alternating ventilaton devices
"""
T = 120 # in s
#%%% database details
schema = database.lower() # the schema that you work on
''' this engine is used where ever connection is required to database'''
engine = create_engine("mysql+pymysql://wojtek:<EMAIL>/{}".format(schema),pool_pre_ping=True)
#%% Main Class Tau_l
"""
###############################################################################
## Classes ##################################################################
###############################################################################
"""
class Tau_l:
end_runtime, tau_l_list, plot_log_end_list = [], [], []
df_plot_list, plot_delta_list, plot_pd_SMA_list, plot_pd_SMA_end_list = [], [], [], []
plot_log_list, df_clean_list, sec_list, plot_delta_end_list = [], [], [], []
"""
Calculation and handover of a dataframe with the relavnt values for all the
measurement positions in the flat.
1st) Indices of the dataframe are named after:
- measurement season (W = winter or S = summer)
- ventilation level (e.g. level 5 = L5)
- exhaust device mode (e.g. exhaust device bath L1, kitchen off =
Exbk10, or exhaust device kitchen L3 = Exbk03)
- indoor doors open or closed (open = Do or closed = Dc)
- measurement position (e.g. MP1A)
-> example: S_L5_Exbk10_Do
2nd) Colums:
'tau_lav' = value of the local mean age of air, in h
'sgm_tau_lav' = simple uncertainty of tau_l,av, in h
't0' = timestamp of the initial averager tracer-gas
concentration, in '%Y-%m-%d %H:%M:%S'
'dltC0_av' = initial average offset to natural background
concentrartion of the tracer-gas of the whole
first considered ventilation cycle, in ppm
'sgm_dltC0_av' = simple uncertainty of dlt-C0_av, in ppm, considering:
- uncertainty natural background concentration
- uncertainty sensors
- standard deviation of the values belonging to
to the first cycle
'max_dltC0' = maximum value dltC0 (the last ventilation cycle
before the decay curve), in ppm
'min_dltC0' = minimum value dltC0 (the last ventilation cycle
before the decay curve), in ppm
'te' = timestamp of the end of the considered time
evaluation intval at dlt-C0_av*0.37,
in '%Y-%m-%d %H:%M:%S'
'dltCe_av' = final average offset to natural background
concentrartion of the tracer-gas of the whole
last considered ventilation cycle, in ppm
'sgm_dltCe_av' = simple uncertainty of dlt-Ce_av, in ppm, considering:
- uncertainty natural background concentration
- uncertainty sensors
- standard deviation of the values belonging to
to the final cycle
'max_dltCe' = maximum value of dltCe (values of the last
considered ventilation cycle), in ppm
'min_dltCe' = minimum value of dltCe (values of the last
considered ventilation cycle), in ppm
'lmb_tailav' = tail parameter of the appoximation integral
according to DIN ISO 16000-8, in 1/h
'sgm_lmb_tailav' = maximum out of propagate uncertainty of the
depending parameters & standard deviation of
the average logarithmic slope acc ISO 16000-8.,
in 1/h
'max_lmb_tail' = maximum value of lmb_tail, in 1/h
'min_lmb_tail' = maximum value of lmb_tail, in 1/h
"""
import pandas as pd
import numpy as np
# import matplotlib.pyplot as plt
# CLASS OBJECT ATTRIBUTE: same for any instance of this class
# - none jet defined -
def __init__(self, filename,t0 = t0,database = database,z = z, level = level, season = season, v = v, Vdot = Vdot):
# ATTRIBUTES: user defineable
# Grouping attributes to "headers" indicating which type is expected
# strings
self.season = season # measurement season: W = winter or S = summer
self.level = level # number between 1 and 9
self.v = v
self.Vdot = Vdot
'''
exhaust device mode (e.g. exhaust device bath L1, kitchen off =
bk10, or exhaust device kitchen L3 = bk03)
'''
self.filename = filename
'''
Format:
[room/zone: 2x ASCII-single value]_
MP[measurement position indicator: 2x ASCII-single value]_
[sensor identifier: 4x ASCII-single value]_
[initial timestamp: %Y%m%d_%H%M]_
bis_
[finial timestamp: %Y%m%d_%H%M].
[file type: xlsx or csv]
'''
# integers
self.T = 120 # Period time of alternating ventilation devices
self.i = 0 # row number t0_es in self.df_CO2
self.j = 0 # row number tend_es in self.df_CO2
# dictionay
self.tauloc
# padas.Datetime Timestamp('%Y-%m-%d %H:%M:%S')
self.t0 = t0
# pandas.DatetimeIndex Timestamp('%Y-%m-%d %H:%M:%S')
self.tend_es = pd.DatetimeIndex
self.database = database.lower()
self.start = start
self.end = end
self.df_CO2, self.sec = self.clean_sql_reg()
self.Cout = self.aussen()
#%%% Main method
# OPERATIONS/ACTIONS = Methods
# Main-Method
def tauloc(self):
name = (self.season + '_' + self.level + '_' +
self.filename[0:])
self.tau_l = pd.DataFrame(columns=(
'tau_lav',
'sgm_tau_lav',
't0',
'dltC0_av',
'sgm_dltC0_av',
'max_dltC0',
'min_dltC0',
'te',
'dltCe_av',
'sgm_dltCe_av',
'max_dltCe',
'min_dltCe',
'lmb_tailav',
'sgm_lmb_tailav',
'max_lmb_tail',
'min_lmb_tail',
'time_steps',
'row_start',
'row_end'),
index=[name])
self.df_CO2 = self.co2diff()
while not(self.t0 in self.df_CO2.index):
self.t0 = self.t0 + dt.timedelta(seconds=1)
print(self.t0)
self.tau_l['t0'].loc[name] = self.t0
self.C0 = self.c0()
self.tau_l['dltC0_av'].loc[name] = (
self.C0['pd_SMA'].loc[self.t0])
'''
-----------------------------------------------------------------------
FUTURE TASKS:
1st) Uncertainty characteristics of 'dltC0_av'
-----------------------------------------------------------------------
'''
# self.tau_l['sgm_dltC0_av'].loc[name] = (
# self.C0['???'].loc[self.t0_es])
# self.tau_l['max_dltC0'].loc[name] = (
# self.C0['???'].loc[self.t0_es])
# self.tau_l[,'min_dltC0'].loc[name] = (
# self.C0['???'].loc[self.t0_es])
self.Cend = self.cend()
self.i = self.Cend[1]
self.j = self.Cend[2]
self.Cend = self.Cend[0]
self.tend_es = self.Cend.iloc[[0]].index.tolist()[0]
self.tau_l['te'].loc[name] = self.tend_es
self.tau_l['dltCe_av'].loc[name] = (
self.Cend['pd_SMA'].loc[self.tend_es])
'''
-----------------------------------------------------------------------
FUTURE TASKS:
1st) Uncertainty characteristics of 'dltC0_av'
-----------------------------------------------------------------------
'''
# self.tau_l['sgm_dltCe_av'].loc[name] = (
# self.Cend['???'].loc[self.tend_es])
# self.tau_l['max_dltCe'].loc[name] = (
# self.Cend['???'].loc[self.tend_es])
# self.tau_l['min_dltCe'].loc[name] = (
# self.Cend['???'].loc[self.tend_es])
self.lmd_tl = self.lmdtail()
self.tau_l['lmb_tailav'].loc[name] = (
1/(self.lmd_tl['lmb_tailav'].loc[self.filename[0:]]*3600)
)
self.tau_l['sgm_lmb_tailav'].loc[name] = (
1/(self.lmd_tl['sgm_lmb_tailav'].loc[self.filename[0:]]*3600)
)
self.tau_l['max_lmb_tail'].loc[name] = (
1/(self.lmd_tl['max_lmb_tail'].loc[self.filename[0:]]*3600)
)
self.tau_l['min_lmb_tail'].loc[name] = (
1/(self.lmd_tl['min_lmb_tail'].loc[self.filename[0:]]*3600)
)
"""
Approch according to DIN ISO 16000-8:
DIN ISO 2008. DIN ISO 16000-8, Innenraumluftverunreinigungen – Teil 8:
Bestimmung des lokalen Alters der Luft in Gebäuden zur
Charakterisierung der Lüftungsbedingungen (ISO 16000-8:2007).
Berlin. Deutsches Institut für Normung e.V. and International
Organization for Standardization. [equation 3]
"""
k = 0
sumConz = 0
for k in range(1,len(self.df_CO2)):
if (np.sum((self.df_CO2.iloc[[k]].index > self.C0.iloc[[0]].index) &
(self.df_CO2.iloc[[k]].index < self.Cend.iloc[[0]].index))) == 1:
sumConz = sumConz + self.df_CO2['pd_SMA'].iloc[k]
if np.sum(self.df_CO2.iloc[[k]].index >= self.Cend.iloc[[0]].index) == 1:
break
tau_lav = (
1/(3600*self.C0['Delta_in-out_C'].iloc[0])*(
self.sec * ( self.C0['Delta_in-out_C'].iloc[0]/2 +
sumConz +
self.Cend['pd_SMA'].iloc[0]/2
) + (
self.Cend['pd_SMA'].iloc[0]/
self.lmd_tl['lmb_tailav'].iloc[0]
)
)
)
self.tau_l.loc[name, 'tau_lav'] = tau_lav
self.tau_l.loc[name, 'time_steps'] = self.sec
self.tau_l.loc[name, 'row_start'] = self.i
self.tau_l.loc[name, 'row_end'] = self.j
#######################################################################
self.plot_delta = self.co2diff()[['runtime','Delta_in-out_C']]
new_col = [self.filename if col != "runtime" else str(col) for col in self.plot_delta.columns]
self.plot_delta.columns = new_col
self.plot_delta_list.append(self.plot_delta)
self.plot_delta_end_list.append(self.Cend["Delta_in-out_C"].iloc[0])
self.end_runtime.append(self.Cend["runtime"].iloc[0])
self.plot_pd_SMA = self.co2diff()[['runtime','pd_SMA']]
new_col = [self.filename if col != "runtime" else str(col) for col in self.plot_pd_SMA.columns]
self.plot_pd_SMA.columns = new_col
self.plot_pd_SMA_list.append(self.plot_pd_SMA)
self.plot_pd_SMA_end_list.append(self.Cend["pd_SMA"].iloc[0])
self.plot_log = self.co2diff()[['runtime', 'log_value']]
new_col = [ self.filename if col != "runtime" else str(col) for col in self.plot_log.columns]
self.plot_log.columns = new_col
self.plot_log_list.append(self.plot_log)
self.plot_log_end_list.append(self.Cend["log_value"].iloc[0])
#######################################################################
prRed(self.filename)
return self.tau_l
#%%% Sub Methods
# Sub-methods
def clean_sql_reg(self):
self.sensor_name = self.filename
accuracy1 = 50 # it comes from the equation of uncertainity for testo 450 XL
accuracy2 = 0.02 # ±(50 ppm CO2 ±2% of mv)(0 to 5000 ppm CO2 )
accuracy3 = 50 # the same equation for second testo 450 XL
accuracy4 = 0.02
accuracy5 = 75 # # the same equation for second testo 480
accuracy6 = 0.03 # Citavi Title: Testo AG
'''
The following if esle statement is writtten to import the right data
for calibration offset equation
There are two time periods where calibration was done and this
'''
if (self.database == "cbo_summer") or (self.database == "cbo_winter") or (self.database == "eshl_winter"):
engine = create_engine("mysql+pymysql://wojtek:Password#[email protected]/{}".format("cbo_calibration"),pool_pre_ping=True)
# engine = create_engine("mysql+pymysql://root:@172.16.58.3/{}".format("cbo_calibration"),pool_pre_ping=True)
elif self.database == "eshl_summer":
engine = create_engine("mysql+pymysql://wojtek:Password#<EMAIL>/{}".format("eshl_calibration"),pool_pre_ping=True)
# engine = create_engine("mysql+pymysql://root:@172.16.58.3/{}".format("eshl_calibration"),pool_pre_ping=True)
else:
print("Please select a correct database")
'''standard syntax to import sql data as dataframe
engine 1 is measurement campagin experimentl data and engine is calibration data'''
engine1 = create_engine("mysql+pymysql://wojtek:Password#102@<EMAIL>.mysql.database.<EMAIL>/{}".format(self.database),pool_pre_ping=True)
'''Calibration data is imported '''
reg_result = pd.read_sql_table("reg_result", con = engine).drop("index", axis = 1)
'''Calibration data for the particular sensor alone is filtered '''
res = reg_result[reg_result['sensor'].str.lower() == self.sensor_name].reset_index(drop = True)
'''This is to filter the HOBOs from testos, The hobos will have a res variable Testos will not have
because they dont have experimantal calibration offset'''
if res.shape[0] == 1:
''' The imported sql data is cleaned and columns are renamed to suit to out calculation'''
self.sensor_df = pd.read_sql_query("SELECT * FROM {}.{} WHERE datetime BETWEEN '{}' AND '{}'".format(self.database, self.sensor_name, self.start, self.end) , engine1).drop('index', axis =1)
self.sensor_df['CO2_ppm_reg'] = self.sensor_df.eval(res.loc[0, "equation"])
self.sensor_df = self.sensor_df.rename(columns = {'CO2_ppm':'CO2_ppm_original', 'CO2_ppm_reg': 'C_CO2 in ppm'})
self.sensor_df = self.sensor_df.drop_duplicates(subset=['datetime'])
self.sensor_df = self.sensor_df.loc[:, ["datetime", "C_CO2 in ppm", "CO2_ppm_original"]]
self.sensor_df = self.sensor_df.dropna()
'''This is the absolute uncertainity at each point of measurement saved in the
dataframe at each timestamp Ref: equation D2 in DIN ISO 16000-8:2008-12'''
'''For ESHL summer ideally we take mean of all three sensors and also propogate
the uncertainities of al three testo sensors, This is not done here at the moment
But to get the most uncertainity possible we peopogte the uncertainity first'''
# Why RSE ? https://stats.stackexchange.com/questions/204238/why-divide-rss-by-n-2-to-get-rse
self.sensor_df["s_meas"] = np.sqrt(np.square((self.sensor_df["C_CO2 in ppm"] * accuracy2)) + np.square(accuracy1) + np.square((self.sensor_df["C_CO2 in ppm"] * accuracy4)) + np.square(accuracy3) + np.square((self.sensor_df["C_CO2 in ppm"] * accuracy6)) + np.square(accuracy5)+ np.square(res.loc[0, "rse"]))
# Die Messunsicherheit hängt sicher in einem bestimmten Umfang vom Konzentrationsbereich ab.DIN ISO 16000-8:2008-12 (page 36)
x = self.sensor_df["datetime"][2] - self.sensor_df["datetime"][1]
self.sec = int(x.total_seconds())
"""
Creating a runtime column with t0 as 0 or centre of the time axes
"""
t0_cd = self.sensor_df['datetime'].loc[0]
while not(self.t0 in self.sensor_df["datetime"].to_list()):
self.t0 = self.t0 + dt.timedelta(seconds=1)
print(self.t0)
dtl_t0 = (self.t0 - t0_cd)//dt.timedelta(seconds=1)
"""
Calucates the elapsed time stored in the array x as an interger of seconds
"""
endpoint = len(self.sensor_df) * self.sec - dtl_t0
"""
Creates an array starting with 0 till endpoint with stepsize sec.
"""
x = np.arange(-dtl_t0,endpoint,self.sec)
self.sensor_df['runtime'] = x
self.sensor_df2 = self.sensor_df.set_index('datetime')
self.rhg = pd.date_range(self.sensor_df2.index[0], self.sensor_df2.index[-1], freq=str(self.sec)+'S')
self.sensor_df = self.sensor_df2.reindex(self.rhg).interpolate()
return self.sensor_df, self.sec
else:
'''The steps taken place before simply repeat here below for testo sensors
'''
self.sensor_df = pd.read_sql_query("SELECT * FROM {}.{} WHERE datetime BETWEEN '{}' AND '{}'".format(self.database, self.sensor_name, self.start, self.end) , engine) # selects a table from a database according to table name
self.sensor_df = self.sensor_df.drop_duplicates(subset=['datetime'])
self.sensor_df = self.sensor_df.loc[:, ["datetime", "CO2_ppm"]]
self.sensor_df = self.sensor_df.rename(columns = {"CO2_ppm":"C_CO2 in ppm"})
self.sensor_df["s_meas"] = np.sqrt(np.square((self.sensor_df["C_CO2 in ppm"] * accuracy2)) + np.square(accuracy1) + np.square((self.sensor_df["C_CO2 in ppm"] * accuracy4)) + np.square(accuracy3) + np.square((self.sensor_df["C_CO2 in ppm"] * accuracy6)) + np.square(accuracy5))
self.sensor_df = self.sensor_df.dropna()
x = self.sensor_df["datetime"][2] - self.sensor_df["datetime"][1]
self.sec = int(x.total_seconds())
"""
Creating a runtime column with t0 as 0 or centre of the time axes
"""
t0_cd = self.sensor_df['datetime'].loc[0]
while not(self.t0 in self.sensor_df["datetime"].to_list()):
self.t0 = self.t0 + dt.timedelta(seconds=1)
print(self.t0)
dtl_t0 = (self.t0 - t0_cd)//dt.timedelta(seconds=1)
"""
Calucates the elapsed time stored in the array x as an interger of seconds
"""
endpoint = len(self.sensor_df) * self.sec - dtl_t0
"""
Creates an array starting with 0 till endpoint with stepsize sec.
"""
x = np.arange(-dtl_t0,endpoint,self.sec)
self.sensor_df['runtime'] = x
self.sensor_df2 = self.sensor_df.set_index('datetime')
self.rhg = pd.date_range(self.sensor_df2.index[0], self.sensor_df2.index[-1], freq=str(self.sec)+'S')
self.sensor_df = self.sensor_df2.reindex(self.rhg).interpolate()
return self.sensor_df, self.sec
def aussen(self):
if self.database == "cbo_summer":
self.Cout = {'meanCO2': 445.1524174626867,
'sgm_CO2': 113.06109664245112,
'maxCO2': 514.3716999999999,
'minCO2': 373.21639999999996}
self.cout_mean, self.cout_max, self.cout_min = 445.1524174626867, 514.3716999999999, 373.21639999999996
return self.Cout
else:
accuracy1 = 50 # it comes from the equation of uncertainity for testo 450 XL
accuracy2 = 0.02 # ±(50 ppm CO2 ±2% of mv)(0 to 5000 ppm CO2 )
accuracy3 = 50 # the same equation for second testo 450 XL
accuracy4 = 0.02
accuracy5 = 75 # # the same equation for second testo 480
accuracy6 = 0.03 # Citavi Title: Testo AG
'''
The following if esle statement is writtten to import the right data
for calibration offset equation
There are two time periods where calibration was done and this
'''
self.database = self.database.lower()
if (self.database == "cbo_summer") or (self.database == "cbo_winter") or (self.database == "eshl_winter"):
engine = create_engine("mysql+pymysql://wojtek:Password#<EMAIL>/{}".format("cbo_calibration"),pool_pre_ping=True)
# engine = create_engine("mysql+pymysql://root:@172.16.58.3/{}".format("cbo_calibration"),pool_pre_ping=True)
elif self.database == "eshl_summer":
engine = create_engine("mysql+pymysql://wojtek:Password#<EMAIL>/{}".format("eshl_calibration"),pool_pre_ping=True)
# engine = create_engine("mysql+pymysql://root:@172.16.58.3/{}".format("eshl_calibration"),pool_pre_ping=True)
else:
print("Please select a correct database")
'''standard syntax to import sql data as dataframe
engine 1 is measurement campagin experimentl data and engine is calibration data'''
engine1 = create_engine("mysql+pymysql://wojtek:Password<EMAIL>/{}".format(self.database),pool_pre_ping=True)
'''Calibration data is imported '''
reg_result = pd.read_sql_table("reg_result", con = engine).drop("index", axis = 1)
'''Calibration data for the particular sensor alone is filtered '''
res = reg_result[reg_result['sensor'].str.lower() == "außen"].reset_index(drop = True)
'''This is to filter the HOBOs from testos, The hobos will have a res variable Testos will not have
because they dont have experimantal calibration offset'''
if res.shape[0] == 1:
''' The imported sql data is cleaned and columns are renamed to suit to out calculation'''
self.sensor_df3 = pd.read_sql_query("SELECT * FROM {}.{} WHERE datetime BETWEEN '{}' AND '{}'".format(self.database, "außen", self.start, self.end) , engine1).drop('index', axis =1)
self.sensor_df3['CO2_ppm_reg'] = self.sensor_df3.eval(res.loc[0, "equation"])
self.sensor_df3 = self.sensor_df3.rename(columns = {'CO2_ppm':'CO2_ppm_original', 'CO2_ppm_reg': 'C_CO2 in ppm'})
self.sensor_df3 = self.sensor_df3.drop_duplicates(subset=['datetime'])
self.sensor_df3 = self.sensor_df3.loc[:, ["datetime", "C_CO2 in ppm", "CO2_ppm_original"]]
self.sensor_df3 = self.sensor_df3.dropna()
'''This is the absolute uncertainity at each point of measurement saved in the
dataframe at each timestamp Ref: equation D2 in DIN ISO 16000-8:2008-12'''
'''For ESHL summer ideally we take mean of all three sensors and also propogate
the uncertainities of al three testo sensors, This is not done here at the moment
But to get the most uncertainity possible we peopogte the uncertainity first'''
# Why RSE ? https://stats.stackexchange.com/questions/204238/why-divide-rss-by-n-2-to-get-rse
self.sensor_df3["s_meas"] = np.sqrt(np.square((self.sensor_df3["C_CO2 in ppm"] * accuracy2)) + np.square(accuracy1) + np.square((self.sensor_df3["C_CO2 in ppm"] * accuracy4)) + np.square(accuracy3) + np.square((self.sensor_df3["C_CO2 in ppm"] * accuracy6)) + np.square(accuracy5)+ np.square(res.loc[0, "rse"]))
# Die Messunsicherheit hängt sicher in einem bestimmten Umfang vom Konzentrationsbereich ab.DIN ISO 16000-8:2008-12 (page 36)
x = self.sensor_df3["datetime"][2] - self.sensor_df3["datetime"][1]
self.sec3 = int(x.total_seconds())
"""
Creating a runtime column with t0 as 0 or centre of the time axes
"""
t0_cd = self.sensor_df3['datetime'].loc[0]
while not(self.t0 in self.sensor_df3["datetime"].to_list()):
self.t0 = self.t0 + dt.timedelta(seconds=1)
print(self.t0)
dtl_t0 = (self.t0 - t0_cd)//dt.timedelta(seconds=1)
"""
Calucates the elapsed time stored in the array x as an interger of seconds
"""
endpoint = len(self.sensor_df3) * self.sec3 - dtl_t0
"""
Creates an array starting with 0 till endpoint with stepsize sec3.
"""
x = np.arange(-dtl_t0,endpoint,self.sec3)
self.sensor_df3['runtime'] = x
self.sensor_df2 = self.sensor_df3.set_index('datetime')
self.rhg = pd.date_range(self.sensor_df2.index[0], self.sensor_df2.index[-1], freq=str(self.sec3)+'S')
self.au_mean = self.sensor_df2.reindex(self.rhg).interpolate()
self.au_mean['C_CO2 in ppm_out'] = self.au_mean['C_CO2 in ppm']
self.cout_max = self.au_mean['C_CO2 in ppm_out'].max()
self.cout_min = self.au_mean['C_CO2 in ppm_out'].min()
self.cout_mean = self.au_mean['C_CO2 in ppm_out'].mean()
"""
The default value (499±97)ppm (kp=2) has been calculated as the average CO2-
concentration of the available outdoor measurement data in
...\CO2-concentration_outdoor\.
However the value should be setted as a list of datapoints for the natural
outdoor concentration for a time inverval covering the measurement interval.
In future it would be great to have a dataframe with CO2-concentrations for
coresponding time stamps.
"""
self.Cout = {'meanCO2':self.cout_mean,
'sgm_CO2':self.au_mean["s_meas"].mean(), # More clarification needed on uncertainity
'maxCO2':self.cout_max,
'minCO2':self.cout_min}
return self.Cout
"""
If nothing else is passed this calculates the difference of the
measured CO₂-concentration to the averager outdoor CO₂-concentration.
Where the CO₂-concentration is by default setted to (499±97)ppm (kp=1).
Thise value has been calculated as the average CO₂-concentration of the
available outdoor measurement data in ...\CO2-concentration_outdoor\.
"""
def co2diff(self):
import numpy as np
# Calculates difference of measured to background/outdoor cconcentration
'''
-----------------------------------------------------------------------
FUTURE TASKS:
1st) Check whether a data frame of outdoor CO₂-concentration for
several time stamps within the considered time interval has
been passed. If "True" than use those concentrations instead.
2nd) Calculate the uncertainty- and range-measures of the
'Delta_in-out_C'-values.
-----------------------------------------------------------------------
'''
# self.df_CO2['Delta_in-out_C'] = (self.df_CO2['C_CO2 in ppm'] -
# self.Cout['meanCO2'])
self.df_CO2['Delta_in-out_C']=self.df_CO2['C_CO2 in ppm'].subtract(self.Cout['meanCO2'])
# Moving average of measured data covering a time interval of T
'''
-----------------------------------------------------------------------
FUTURE TASKS:
1st) Calculate the uncertainty- (standard deviation) and range-
measures of the 'pd_SMA'-values over the time interval of T
-----------------------------------------------------------------------
'''
self.df_CO2['pd_SMA'] = self.df_CO2['Delta_in-out_C'].rolling(
window=((self.T//self.sec)+1)
).mean()
# Natural logarithm of the moving average
'''
-----------------------------------------------------------------------
FUTURE TASKS:
1st) Calculate the uncertainty- and range-
measures of the 'log_value'-values over the time interval of T
-----------------------------------------------------------------------
'''
self.df_CO2['log_value'] = np.log(self.df_CO2['pd_SMA'])
return self.df_CO2
"""
Calculates the initial concentration of the decay curve as the average-value
of the last ventilation cycle (T=120 s) of the ventilation devices.
The value is saved together with the last equally spaced timestamp t0_es and
runtime value.
---------------------------------------------------------------------------
FUTURE TASKS:
1st) Include a check whether the data frame df_CO2 has already been
supplemented by the column 'Delta_in-out_C'. In case of "False"
co2diff(self) should be performed.
---------------------------------------------------------------------------
"""
def c0(self):
self.df_C0 = self.df_CO2.truncate(
before=(pd.to_datetime(self.t0,format = '%Y-%m-%d %H:%M:%S') -
|
pd.to_timedelta(120, unit='s')
|
pandas.to_timedelta
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#-------------read csv---------------------
df_2010_2011 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2010_2011.csv")
df_2012_2013 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2012_2013.csv")
df_2014_2015 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2014_2015.csv")
df_2016_2017 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2016_2017.csv")
df_2018_2019 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2018_2019.csv")
df_2010_2011['prcab'].fillna(0)
df_2012_2013['prcab'].fillna(0)
df_2014_2015['prcab'].fillna(0)
df_2016_2017['prcab'].fillna(0)
df_2018_2019['prcab'].fillna(0)
print(df_2018_2019['prcab'])
mask = df_2010_2011['surgyear'] != 2010
df_2011 = df_2010_2011[mask]
df_2010 = df_2010_2011[~mask]
mask2 = df_2012_2013['surgyear'] != 2012
df_2013 = df_2012_2013[mask2]
df_2012 = df_2012_2013[~mask2]
mask3 = df_2014_2015['surgyear'] != 2014
df_2015 = df_2014_2015[mask3]
df_2014 = df_2014_2015[~mask3]
mask4 = df_2016_2017['surgyear'] != 2016
df_2017 = df_2016_2017[mask4]
df_2016 = df_2016_2017[~mask4]
mask5 = df_2018_2019['surgyear'] != 2018
df_2019 = df_2018_2019[mask5]
df_2018 = df_2018_2019[~mask5]
avg_siteid = pd.DataFrame()
avg_surgid = pd.DataFrame()
# #tmpHilla=df_2018_2019.columns
# tmpHilla=pd.DataFrame(df_2018_2019.columns.values.tolist())
# tmpHilla.to_csv("/tmp/pycharm_project_355/columns.csv")
# my_list = df_2010_2011.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2012_2013.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2014_2015.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2016_2017.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2018_2019.columns.values.tolist()
# print (my_list)
# print()
#-------------------merge all csv--------------------------
# dfMerge1 = pd.merge(df_2010_2011, df_2012_2013, on='surgorder')
# dfMerge2 = pd.merge(dfMerge1, df_2014_2015, on='surgorder')
# dfMerge = pd.merge(dfMerge2, df_2016_2017, on='surgorder')
#dfMerge = pd.merge(df_2010_2011, df_2012_2013, on='SiteID')
#count distinc
#table.groupby('YEARMONTH').CLIENTCODE.nunique()
def groupby_siteid():
df_2010 = df_2010_2011.groupby('siteid')['surgyear'].apply(lambda x: (x== 2010 ).sum()).reset_index(name='2010')
df_2011 = df_2010_2011.groupby('siteid')['surgyear'].apply(lambda x: (x== 2011 ).sum()).reset_index(name='2011')
df_2012 = df_2012_2013.groupby('siteid')['surgyear'].apply(lambda x: (x== 2012 ).sum()).reset_index(name='2012')
df_2013 = df_2012_2013.groupby('siteid')['surgyear'].apply(lambda x: (x== 2013 ).sum()).reset_index(name='2013')
df_2014 = df_2014_2015.groupby('siteid')['surgyear'].apply(lambda x: (x== 2014 ).sum()).reset_index(name='2014')
df_2015 = df_2014_2015.groupby('siteid')['surgyear'].apply(lambda x: (x== 2015 ).sum()).reset_index(name='2015')
df_2016 = df_2016_2017.groupby('siteid')['surgyear'].apply(lambda x: (x== 2016 ).sum()).reset_index(name='2016')
df_2017 = df_2016_2017.groupby('siteid')['surgyear'].apply(lambda x: (x== 2017 ).sum()).reset_index(name='2017')
df_2018 = df_2018_2019.groupby('siteid')['surgyear'].apply(lambda x: (x== 2018 ).sum()).reset_index(name='2018')
df_2019 = df_2018_2019.groupby('siteid')['surgyear'].apply(lambda x: (x== 2019 ).sum()).reset_index(name='2019')
df1 =pd.merge(df_2010, df_2011, on='siteid')
df2 =pd.merge(df1, df_2012, on='siteid')
df3 =pd.merge(df2, df_2013, on='siteid')
df4 =pd.merge(df3, df_2014, on='siteid')
df5 =pd.merge(df4, df_2015, on='siteid')
df6 =pd.merge(df5, df_2016, on='siteid')
df7 =pd.merge(df6, df_2017, on='siteid')
df8 =pd.merge(df7, df_2018, on='siteid')
df_sum_all_Years =pd.merge(df8, df_2019, on='siteid')
cols = df_sum_all_Years.columns.difference(['siteid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['siteid','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("total op sum all years siteid.csv")
print("details on site id dist:")
print ("num of all sites: ", len(df_sum_all_Years))
less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
less_8.to_csv("total op less 10 years siteid.csv")
print("num of sites with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
avg_siteid['siteid'] = df_sum_all_Years['siteid']
avg_siteid['total_year_avg'] = df_sum_all_Years['Year_avg']
def groupby_surgid():
df_2010 = df_2010_2011.groupby('surgid')['surgyear'].apply(lambda x: (x== 2010 ).sum()).reset_index(name='2010')
df_2011 = df_2010_2011.groupby('surgid')['surgyear'].apply(lambda x: (x== 2011 ).sum()).reset_index(name='2011')
df_2012 = df_2012_2013.groupby('surgid')['surgyear'].apply(lambda x: (x== 2012 ).sum()).reset_index(name='2012')
df_2013 = df_2012_2013.groupby('surgid')['surgyear'].apply(lambda x: (x== 2013 ).sum()).reset_index(name='2013')
df_2014 = df_2014_2015.groupby('surgid')['surgyear'].apply(lambda x: (x== 2014 ).sum()).reset_index(name='2014')
df_2015 = df_2014_2015.groupby('surgid')['surgyear'].apply(lambda x: (x== 2015 ).sum()).reset_index(name='2015')
df_2016 = df_2016_2017.groupby('surgid')['surgyear'].apply(lambda x: (x== 2016 ).sum()).reset_index(name='2016')
df_2017 = df_2016_2017.groupby('surgid')['surgyear'].apply(lambda x: (x== 2017 ).sum()).reset_index(name='2017')
df_2018 = df_2018_2019.groupby('surgid')['surgyear'].apply(lambda x: (x== 2018 ).sum()).reset_index(name='2018')
df_2019 = df_2018_2019.groupby('surgid')['surgyear'].apply(lambda x: (x== 2019 ).sum()).reset_index(name='2019')
df1 =pd.merge(df_2010, df_2011, on='surgid')
df2 =pd.merge(df1, df_2012, on='surgid')
df3 =pd.merge(df2, df_2013, on='surgid')
df4 =pd.merge(df3, df_2014, on='surgid')
df5 =pd.merge(df4, df_2015, on='surgid')
df6 =pd.merge(df5, df_2016, on='surgid')
df7 =pd.merge(df6, df_2017, on='surgid')
df8 =pd.merge(df7, df_2018, on='surgid')
df_sum_all_Years =pd.merge(df8, df_2019, on='surgid')
cols = df_sum_all_Years.columns.difference(['surgid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['surgid','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("sum all years surgid.csv")
print()
print("details of surgid dist:")
print("num of all surgid: ", len(df_sum_all_Years))
less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
less_8.to_csv("less 10 years surgid.csv")
print("num of doctors with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
avg_surgid['surgid'] = df_sum_all_Years['surgid']
avg_surgid['total_year_avg'] = df_sum_all_Years['Year_avg']
def groupby_hospid():
df_2010 = df_2010_2011.groupby('hospid')['surgyear'].apply(lambda x: (x== 2010 ).sum()).reset_index(name='2010')
df_2011 = df_2010_2011.groupby('hospid')['surgyear'].apply(lambda x: (x== 2011 ).sum()).reset_index(name='2011')
df_2012 = df_2012_2013.groupby('hospid')['surgyear'].apply(lambda x: (x== 2012 ).sum()).reset_index(name='2012')
df_2013 = df_2012_2013.groupby('hospid')['surgyear'].apply(lambda x: (x== 2013 ).sum()).reset_index(name='2013')
df_2014 = df_2014_2015.groupby('hospid')['surgyear'].apply(lambda x: (x== 2014 ).sum()).reset_index(name='2014')
df_2015 = df_2014_2015.groupby('hospid')['surgyear'].apply(lambda x: (x== 2015 ).sum()).reset_index(name='2015')
df_2016 = df_2016_2017.groupby('hospid')['surgyear'].apply(lambda x: (x== 2016 ).sum()).reset_index(name='2016')
df_2017 = df_2016_2017.groupby('hospid')['surgyear'].apply(lambda x: (x== 2017 ).sum()).reset_index(name='2017')
df_2018 = df_2018_2019.groupby('hospid')['surgyear'].apply(lambda x: (x== 2018 ).sum()).reset_index(name='2018')
df_2019 = df_2018_2019.groupby('hospid')['surgyear'].apply(lambda x: (x== 2019 ).sum()).reset_index(name='2019')
df1 =pd.merge(df_2010, df_2011, on='hospid')
df2 =pd.merge(df1, df_2012, on='hospid')
df3 =pd.merge(df2, df_2013, on='hospid')
df4 =pd.merge(df3, df_2014, on='hospid')
df5 =pd.merge(df4, df_2015, on='hospid')
df6 =pd.merge(df5, df_2016, on='hospid')
df7 =pd.merge(df6, df_2017, on='hospid')
df8 =pd.merge(df7, df_2018, on='hospid')
df_sum_all_Years =pd.merge(df8, df_2019, on='hospid')
cols = df_sum_all_Years.columns.difference(['hospid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['hospid','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("sum all years hospid.csv")
print(df_sum_all_Years)
print ("num of all sites: ", len(df_sum_all_Years))
less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
less_8.to_csv("less 10 years hospid.csv")
print("num of hospital with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
return df_sum_all_Years
def draw_hist(data,num_of_bins,title,x_title,y_title,color):
plt.hist(data, bins=num_of_bins, color=color,ec="black")
plt.title(title)
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.show()
def group_by_count(group_by_value,name):
df_2010_2011_gb = df_2010_2011.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2012_2013_gb = df_2012_2013.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2014_2015_gb = df_2014_2015.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2016_2017_gb = df_2016_2017.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2018_2019_gb = df_2018_2019.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_merge_1=pd.merge(df_2010_2011_gb,df_2012_2013_gb, on=group_by_value)
df_merge_2=pd.merge(df_merge_1,df_2014_2015_gb, on=group_by_value)
df_merge_3=pd.merge(df_merge_2,df_2016_2017_gb, on=group_by_value)
df_merge_4=pd.merge(df_merge_3,df_2018_2019_gb, on=group_by_value)
cols = df_merge_4.columns.difference([group_by_value])
df_merge_4[name] = df_merge_4.loc[:,cols].sum(axis=1)
df_new=pd.DataFrame()
df_new[group_by_value] = df_merge_4[group_by_value]
df_new[name] = df_merge_4[name]
return df_new
def groupby_siteid_prcab():
df2010 = df_2010.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2010_reop')
df2011 = df_2011.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2011_reop')
df2012 = df_2012.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2012_reop')
df2013 = df_2013.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2013_reop')
df2014 = df_2014.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2014_reop')
df2015 = df_2015.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2015_reop')
df2016 = df_2016.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2016_reop')
df2017 = df_2017.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2017_reop')
df2018 = df_2018.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2018_reop')
df2019 = df_2019.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2019_reop')
df1 = pd.merge(df2010, df2011, on='siteid')
df2 = pd.merge(df1, df2012, on='siteid')
df3 = pd.merge(df2, df2013, on='siteid')
df4 = pd.merge(df3, df2014, on='siteid')
df5 = pd.merge(df4, df2015, on='siteid')
df6 = pd.merge(df5, df2016, on='siteid')
df7 = pd.merge(df6, df2017, on='siteid')
df8 = pd.merge(df7, df2018, on='siteid')
df_sum_all_Years = pd.merge(df8, df2019, on='siteid')
cols = df_sum_all_Years.columns.difference(['siteid'])
df_sum_all_Years['Distinct_years_reop'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['siteid', 'Distinct_years_reop'])
df_sum_all_Years['Year_sum_reop'] = df_sum_all_Years.loc[:, cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg_reop'] = df_sum_all_Years['Year_sum_reop'] / df_sum_all_Years['Distinct_years_reop']
df_sum_all_Years.to_csv("sum all years siteid reop.csv")
less_8 = df_sum_all_Years[df_sum_all_Years['Distinct_years_reop'] != 10]
less_8.to_csv("less 10 years reop siteid.csv")
print("num of sites with less years: ", len(less_8))
x = np.array(less_8['Distinct_years_reop'])
print(np.unique(x))
df_10 = df_2010.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2010_Firstop')
df_11 = df_2011.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2011_Firstop')
df_12 = df_2012.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2012_Firstop')
df_13 = df_2013.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2013_Firstop')
df_14 = df_2014.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2014_Firstop')
df_15 = df_2015.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2015_Firstop')
df_16 = df_2016.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2016_Firstop')
df_17 = df_2017.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2017_Firstop')
df_18 = df_2018.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2018_Firstop')
df_19 = df_2019.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2019_Firstop')
d1 = pd.merge(df_10, df_11, on='siteid')
d2 = pd.merge(d1, df_12, on='siteid')
d3 = pd.merge(d2, df_13, on='siteid')
d4 = pd.merge(d3, df_14, on='siteid')
d5 = pd.merge(d4, df_15, on='siteid')
d6 = pd.merge(d5, df_16, on='siteid')
d7 = pd.merge(d6, df_17, on='siteid')
d8 = pd.merge(d7, df_18, on='siteid')
df_sum_all_Years_total = pd.merge(d8, df_19, on='siteid')
cols = df_sum_all_Years_total.columns.difference(['siteid'])
df_sum_all_Years_total['Distinct_years'] = df_sum_all_Years_total[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years_total.columns.difference(['siteid', 'Distinct_years'])
df_sum_all_Years_total['Year_sum'] = df_sum_all_Years_total.loc[:, cols_sum].sum(axis=1)
df_sum_all_Years_total['Year_avg'] = df_sum_all_Years_total['Year_sum'] / df_sum_all_Years_total['Distinct_years']
df_sum_all_Years_total.to_csv("First op sum all years siteid.csv")
# df_sum_all_Years.to_csv("sum all years siteid.csv")
# print(df_sum_all_Years)
# print("num of all sites: ", len(df_sum_all_Years))
#
less = df_sum_all_Years_total[df_sum_all_Years_total['Distinct_years'] != 10]
less.to_csv("First op less 10 years siteid.csv")
print("First op num of sites with less years: ", len(less))
x = np.array(less['Distinct_years'])
print(np.unique(x))
temp_first = pd.DataFrame()
temp_first['siteid'] = df_sum_all_Years_total['siteid']
temp_first['Year_avg_Firstop'] = df_sum_all_Years_total['Year_avg']
temp_reop = pd.DataFrame()
temp_reop['siteid'] = df_sum_all_Years['siteid']
temp_reop['Year_avg_reop'] = df_sum_all_Years['Year_avg_reop']
df20 = pd.merge(avg_siteid, temp_first, on='siteid', how='left')
total_avg_site_id = pd.merge(df20, temp_reop,on='siteid', how='left' )
total_avg_site_id['firstop/total'] = (total_avg_site_id['Year_avg_Firstop']/total_avg_site_id['total_year_avg'])*100
total_avg_site_id['reop/total'] = (total_avg_site_id['Year_avg_reop']/total_avg_site_id['total_year_avg'])*100
total_avg_site_id.to_csv('total_avg_site_id.csv')
# avg_siteid['Year_avg_Firstop'] = df_sum_all_Years_total['Year_avg']
# avg_siteid['Year_avg_reop'] = df_sum_all_Years['Year_avg_reop']
def groupby_surgid_prcab():
df2010 = df_2010.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2010_reop')
df2011 = df_2011.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2011_reop')
df2012 = df_2012.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2012_reop')
df2013 = df_2013.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2013_reop')
df2014 = df_2014.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2014_reop')
df2015 = df_2015.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2015_reop')
df2016 = df_2016.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2016_reop')
df2017 = df_2017.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2017_reop')
df2018 = df_2018.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2018_reop')
df2019 = df_2019.groupby('surgid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2019_reop')
df1 = pd.merge(df2010, df2011, on='surgid')
df2 = pd.merge(df1, df2012, on='surgid')
df3 = pd.merge(df2, df2013, on='surgid')
df4 = pd.merge(df3, df2014, on='surgid')
df5 = pd.merge(df4, df2015, on='surgid')
df6 = pd.merge(df5, df2016, on='surgid')
df7 = pd.merge(df6, df2017, on='surgid')
df8 = pd.merge(df7, df2018, on='surgid')
df_sum_all_Years = pd.merge(df8, df2019, on='surgid')
cols = df_sum_all_Years.columns.difference(['surgid'])
df_sum_all_Years['Distinct_years_reop'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['surgid', 'Distinct_years_reop'])
df_sum_all_Years['Year_sum_reop'] = df_sum_all_Years.loc[:, cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg_reop'] = df_sum_all_Years['Year_sum_reop'] / df_sum_all_Years['Distinct_years_reop']
df_sum_all_Years.to_csv("sum all years surgid reop.csv")
less_8 = df_sum_all_Years[df_sum_all_Years['Distinct_years_reop'] != 10]
less_8.to_csv("less 10 years reop surgid.csv")
print("num of surgid with less years: ", len(less_8))
x = np.array(less_8['Distinct_years_reop'])
print(np.unique(x))
df_10 = df_2010.groupby('surgid')['prcab'].apply(lambda x: ((x==2) | (x==0)).sum()).reset_index(name='2010_Firstop')
df_11 = df_2011.groupby('surgid')['prcab'].apply(lambda x: ((x==2) | (x==0)).sum()).reset_index(name='2011_Firstop')
df_12 = df_2012.groupby('surgid')['prcab'].apply(lambda x: ((x==2) | (x==0)).sum()).reset_index(name='2012_Firstop')
df_13 = df_2013.groupby('surgid')['prcab'].apply(lambda x: ((x==2) | (x==0)).sum()).reset_index(name='2013_Firstop')
df_14 = df_2014.groupby('surgid')['prcab'].apply(lambda x: ((x==2) | (x==0)).sum()).reset_index(name='2014_Firstop')
df_15 = df_2015.groupby('surgid')['prcab'].apply(lambda x: ((x==2) | (x==0)).sum()).reset_index(name='2015_Firstop')
df_16 = df_2016.groupby('surgid')['prcab'].apply(lambda x: ((x==2) | (x==0)).sum()).reset_index(name='2016_Firstop')
df_17 = df_2017.groupby('surgid')['prcab'].apply(lambda x: ((x==2) | (x==0)).sum()).reset_index(name='2017_Firstop')
df_18 = df_2018.groupby('surgid')['prcab'].apply(lambda x: ((x==2) | (x==0)).sum()).reset_index(name='2018_Firstop')
df_19 = df_2019.groupby('surgid')['prcab'].apply(lambda x: ((x==2) | (x==0)).sum()).reset_index(name='2019_Firstop')
print(df_18)
d1 = pd.merge(df_10, df_11, on='surgid')
d2 = pd.merge(d1, df_12, on='surgid')
d3 = pd.merge(d2, df_13, on='surgid')
d4 = pd.merge(d3, df_14, on='surgid')
d5 = pd.merge(d4, df_15, on='surgid')
d6 = pd.merge(d5, df_16, on='surgid')
d7 = pd.merge(d6, df_17, on='surgid')
d8 = pd.merge(d7, df_18, on='surgid')
df_sum_all_Years_total =
|
pd.merge(d8, df_19, on='surgid')
|
pandas.merge
|
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
class TestDataFrameIsIn:
def test_isin(self):
# GH#4211
df = DataFrame(
{
"vals": [1, 2, 3, 4],
"ids": ["a", "b", "f", "n"],
"ids2": ["a", "n", "c", "n"],
},
index=["foo", "bar", "baz", "qux"],
)
other = ["a", "b", "c"]
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])])
def test_isin_empty(self, empty):
# GH#16991
df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]})
expected = DataFrame(False, df.index, df.columns)
result = df.isin(empty)
tm.assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]})
d = {"A": ["a"]}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, "A"] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]})
df.columns = ["A", "A"]
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, "A"] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH#4763
df = DataFrame(
{
"vals": [1, 2, 3, 4],
"ids": ["a", "b", "f", "n"],
"ids2": ["a", "n", "c", "n"],
},
index=["foo", "bar", "baz", "qux"],
)
with pytest.raises(TypeError):
df.isin("a")
with pytest.raises(TypeError):
df.isin("aaa")
def test_isin_df(self):
df1 = DataFrame({"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]})
df2 = DataFrame({"A": [0, 2, 12, 4], "B": [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected["A"].loc[[1, 3]] = True
expected["B"].loc[[0, 2]] = True
tm.assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ["A", "C"]
result = df1.isin(df2)
expected["B"] = False
tm.assert_frame_equal(result, expected)
def test_isin_tuples(self):
# GH#16394
df = pd.DataFrame({"A": [1, 2, 3], "B": ["a", "b", "f"]})
df["C"] = list(zip(df["A"], df["B"]))
result = df["C"].isin([(1, "a")])
tm.assert_series_equal(result, Series([True, False, False], name="C"))
def test_isin_df_dupe_values(self):
df1 =
|
DataFrame({"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]})
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_bounds(self):
self.index._bounds
def test_equals_multi(self):
assert self.index.equals(self.index)
assert not self.index.equals(self.index.values)
assert self.index.equals(Index(self.index.values))
assert self.index.equal_levels(self.index)
assert not self.index.equals(self.index[:-1])
assert not self.index.equals(self.index[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
assert not self.index.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
def test_equals_missing_values(self):
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index.values)
expected = MultiIndex.from_tuples(tups)
assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
assert the_union is self.index
the_union = self.index.union(self.index[:0])
assert the_union is self.index
# won't work in python 3
# tuples = self.index.values
# result = self.index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(self.index)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = self.index.union(other)
# assert result.equals(result2)
def test_intersection(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_int = piece1 & piece2
tups = sorted(self.index[3:5].values)
expected = MultiIndex.from_tuples(tups)
assert the_int.equals(expected)
# corner case, pass self
the_int = self.index.intersection(self.index)
assert the_int is self.index
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = self.index.values
# result = self.index & tuples
# assert result.equals(tuples)
def test_sub(self):
first = self.index
# - now raises (previously was set op difference)
with pytest.raises(TypeError):
first - self.index[-3:]
with pytest.raises(TypeError):
self.index[-3:] - first
with pytest.raises(TypeError):
self.index[-3:] - first.tolist()
with pytest.raises(TypeError):
first.tolist() - self.index[-3:]
def test_difference(self):
first = self.index
result = first.difference(self.index[-3:])
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
assert result.names == (None, None)
# empty, but non-equal
result = self.index.difference(self.index.sortlevel(1)[0])
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
assert result.equals(first[:0])
# name from empty array
result = first.difference([])
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
assert first.names == result.names
tm.assert_raises_regex(TypeError, "other must be a MultiIndex "
"or a list of tuples",
first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
tm.assert_raises_regex(TypeError, 'Cannot infer number of levels '
'from empty list',
MultiIndex.from_tuples, [])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator(self):
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
with tm.assert_raises_regex(
TypeError, 'Input must be a list / sequence of tuple-likes.'):
MultiIndex.from_tuples(0)
def test_from_tuples_empty(self):
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_argsort(self):
result = self.index.argsort()
expected = self.index.values.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_sortlevel(self):
import random
tuples = list(self.index)
random.shuffle(tuples)
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_sortlevel_not_sort_remaining(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)
assert sorted_idx.equals(mi)
def test_sortlevel_deterministic(self):
tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),
('foo', 'one'), ('baz', 'two'), ('qux', 'one')]
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_dims(self):
pass
def test_drop(self):
dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])
index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])
dropped2 = self.index.drop(index)
expected = self.index[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = self.index.drop(['bar'])
expected = self.index[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop('foo')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([('bar', 'two')])
pytest.raises(KeyError, self.index.drop, [('bar', 'two')])
pytest.raises(KeyError, self.index.drop, index)
pytest.raises(KeyError, self.index.drop, ['foo', 'two'])
# partially correct argument
mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')])
pytest.raises(KeyError, self.index.drop, mixed_index)
# error='ignore'
dropped = self.index.drop(index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(['foo', 'two'], errors='ignore')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = self.index.drop(['foo', ('qux', 'one')])
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ['foo', ('qux', 'one'), 'two']
pytest.raises(KeyError, self.index.drop, mixed_index)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(self):
index = self.index[self.index.get_loc('foo')]
dropped = index.droplevel(0)
assert dropped.name == 'second'
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index.droplevel(0)
assert dropped.names == ('two', 'three')
dropped = index.droplevel('two')
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list(self):
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index[:2].droplevel(['three', 'one'])
expected = index[:2].droplevel(2).droplevel(0)
assert dropped.equals(expected)
dropped = index[:2].droplevel([])
expected = index[:2]
assert dropped.equals(expected)
with pytest.raises(ValueError):
index[:2].droplevel(['one', 'two', 'three'])
with pytest.raises(KeyError):
index[:2].droplevel(['one', 'four'])
def test_drop_not_lexsorted(self):
# GH 12078
# define the lexsorted version of the multi-index
tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c'])
assert lexsorted_mi.is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]])
df = df.pivot_table(index='a', columns=['b', 'c'], values='d')
df = df.reset_index()
not_lexsorted_mi = df.columns
assert not not_lexsorted_mi.is_lexsorted()
# compare the results
tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi)
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_index_equal(lexsorted_mi.drop('a'),
not_lexsorted_mi.drop('a'))
def test_insert(self):
# key contained in all levels
new_index = self.index.insert(0, ('bar', 'two'))
assert new_index.equal_levels(self.index)
assert new_index[0] == ('bar', 'two')
# key not contained in all levels
new_index = self.index.insert(0, ('abc', 'three'))
exp0 = Index(list(self.index.levels[0]) + ['abc'], name='first')
tm.assert_index_equal(new_index.levels[0], exp0)
exp1 = Index(list(self.index.levels[1]) + ['three'], name='second')
tm.assert_index_equal(new_index.levels[1], exp1)
assert new_index[0] == ('abc', 'three')
# key wrong length
msg = "Item must have length equal to number of levels"
with tm.assert_raises_regex(ValueError, msg):
self.index.insert(0, ('foo2',))
left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]],
columns=['1st', '2nd', '3rd'])
left.set_index(['1st', '2nd'], inplace=True)
ts = left['3rd'].copy(deep=True)
left.loc[('b', 'x'), '3rd'] = 2
left.loc[('b', 'a'), '3rd'] = -1
left.loc[('b', 'b'), '3rd'] = 3
left.loc[('a', 'x'), '3rd'] = 4
left.loc[('a', 'w'), '3rd'] = 5
left.loc[('a', 'a'), '3rd'] = 6
ts.loc[('b', 'x')] = 2
ts.loc['b', 'a'] = -1
ts.loc[('b', 'b')] = 3
ts.loc['a', 'x'] = 4
ts.loc[('a', 'w')] = 5
ts.loc['a', 'a'] = 6
right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2],
['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4],
['a', 'w', 5], ['a', 'a', 6]],
columns=['1st', '2nd', '3rd'])
right.set_index(['1st', '2nd'], inplace=True)
# FIXME data types changes to float because
# of intermediate nan insertion;
tm.assert_frame_equal(left, right, check_dtype=False)
tm.assert_series_equal(ts, right['3rd'])
# GH9250
idx = [('test1', i) for i in range(5)] + \
[('test2', i) for i in range(6)] + \
[('test', 17), ('test', 18)]
left = pd.Series(np.linspace(0, 10, 11),
pd.MultiIndex.from_tuples(idx[:-2]))
left.loc[('test', 17)] = 11
left.loc[('test', 18)] = 12
right = pd.Series(np.linspace(0, 12, 13),
pd.MultiIndex.from_tuples(idx))
tm.assert_series_equal(left, right)
def test_take_preserve_name(self):
taken = self.index.take([3, 0, 1])
assert taken.names == self.index.names
def test_take_fill_value(self):
# GH 12631
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
result = idx.take(np.array([1, 0, -1]))
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
(np.nan, pd.NaT)]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def take_invalid_kwargs(self):
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
@pytest.mark.parametrize('other',
[Index(['three', 'one', 'two']),
Index(['one']),
Index(['one', 'three'])])
def test_join_level(self, other, join_type):
join_index, lidx, ridx = other.join(self.index, how=join_type,
level='second',
return_indexers=True)
exp_level = other.join(self.index.levels[1], how=join_type)
assert join_index.levels[0].equals(self.index.levels[0])
assert join_index.levels[1].equals(exp_level)
# pare down levels
mask = np.array(
[x[1] in exp_level for x in self.index], dtype=bool)
exp_values = self.index.values[mask]
tm.assert_numpy_array_equal(join_index.values, exp_values)
if join_type in ('outer', 'inner'):
join_index2, ridx2, lidx2 = \
self.index.join(other, how=join_type, level='second',
return_indexers=True)
assert join_index.equals(join_index2)
tm.assert_numpy_array_equal(lidx, lidx2)
tm.assert_numpy_array_equal(ridx, ridx2)
tm.assert_numpy_array_equal(join_index2.values, exp_values)
def test_join_level_corner_case(self):
# some corner cases
idx = Index(['three', 'one', 'two'])
result = idx.join(self.index, level='second')
assert isinstance(result, MultiIndex)
tm.assert_raises_regex(TypeError, "Join.*MultiIndex.*ambiguous",
self.index.join, self.index, level=1)
def test_join_self(self, join_type):
res = self.index
joined = res.join(res, how=join_type)
assert res is joined
def test_join_multi(self):
# GH 10665
midx = pd.MultiIndex.from_product(
[np.arange(4), np.arange(4)], names=['a', 'b'])
idx = pd.Index([1, 2, 5], name='b')
# inner
jidx, lidx, ridx = midx.join(idx, how='inner', return_indexers=True)
exp_idx = pd.MultiIndex.from_product(
[np.arange(4), [1, 2]], names=['a', 'b'])
exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp)
exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='inner', return_indexers=True)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# keep MultiIndex
jidx, lidx, ridx = midx.join(idx, how='left', return_indexers=True)
exp_ridx = np.array([-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0,
1, -1], dtype=np.intp)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='right', return_indexers=True)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_reindex(self):
result, indexer = self.index.reindex(list(self.index[:4]))
assert isinstance(result, MultiIndex)
self.check_level_names(result, self.index[:4].names)
result, indexer = self.index.reindex(list(self.index))
assert isinstance(result, MultiIndex)
assert indexer is None
self.check_level_names(result, self.index.names)
def test_reindex_level(self):
idx = Index(['one'])
target, indexer = self.index.reindex(idx, level='second')
target2, indexer2 = idx.reindex(self.index, level='second')
exp_index = self.index.join(idx, level='second', how='right')
exp_index2 = self.index.join(idx, level='second', how='left')
assert target.equals(exp_index)
exp_indexer = np.array([0, 2, 4])
tm.assert_numpy_array_equal(indexer, exp_indexer, check_dtype=False)
assert target2.equals(exp_index2)
exp_indexer2 = np.array([0, -1, 0, -1, 0, -1])
tm.assert_numpy_array_equal(indexer2, exp_indexer2, check_dtype=False)
tm.assert_raises_regex(TypeError, "Fill method not supported",
self.index.reindex, self.index,
method='pad', level='second')
tm.assert_raises_regex(TypeError, "Fill method not supported",
idx.reindex, idx, method='bfill',
level='first')
def test_duplicates(self):
assert not self.index.has_duplicates
assert self.index.append(self.index).has_duplicates
index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[
[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]])
assert index.has_duplicates
# GH 9075
t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),
(u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119),
(u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135),
(u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145),
(u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158),
(u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122),
(u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160),
(u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180),
(u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143),
(u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128),
(u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129),
(u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111),
(u('x'), u('out'), u('z'), 28, u('y'), u('in'), u('z'), 114),
(u('x'), u('out'), u('z'), 29, u('y'), u('in'), u('z'), 121),
(u('x'), u('out'), u('z'), 31, u('y'), u('in'), u('z'), 126),
(u('x'), u('out'), u('z'), 32, u('y'), u('in'), u('z'), 155),
(u('x'), u('out'), u('z'), 33, u('y'), u('in'), u('z'), 123),
(u('x'), u('out'), u('z'), 12, u('y'), u('in'), u('z'), 144)]
index = pd.MultiIndex.from_tuples(t)
assert not index.has_duplicates
# handle int64 overflow if possible
def check(nlevels, with_nulls):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
if with_nulls: # inject some null values
labels[500] = -1 # common nan value
labels = [labels.copy() for i in range(nlevels)]
for i in range(nlevels):
labels[i][500 + i - nlevels // 2] = -1
labels += [np.array([-1, 1]).repeat(500)]
else:
labels = [labels] * nlevels + [np.arange(2).repeat(500)]
levels = [level] * nlevels + [[0, 1]]
# no dups
index = MultiIndex(levels=levels, labels=labels)
assert not index.has_duplicates
# with a dup
if with_nulls:
def f(a):
return np.insert(a, 1000, a[0])
labels = list(map(f, labels))
index =
|
MultiIndex(levels=levels, labels=labels)
|
pandas.MultiIndex
|
#!/usr/bin/env python
from __future__ import (absolute_import, division, print_function)
# , unicode_literals)
from future.utils import iteritems
from collections import OrderedDict
from datetime import datetime
import logging
import numpy as np
import pandas as pd
import re
import socket
import sys
import time
from Bandit.pr_util import print_warning, print_error
try:
# Python 2.x
from StringIO import StringIO
except ImportError:
# Python 3.x
from io import StringIO
try:
# Try importing assuming Python 3.x first
# from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError, URLError
except ImportError:
# Otherwise fallback to Python 2.x
# from urlparse import urlparse
# from urllib import urlencode
from urllib2 import urlopen, Request, HTTPError, URLError
# URLs can be generated/tested at: http://waterservices.usgs.gov/rest/Site-Test-Tool.html
BASE_NWIS_URL = 'http://waterservices.usgs.gov/nwis'
RETRIES = 3
nwis_logger = logging.getLogger('bandit.NWIS')
class NWIS(object):
"""Class for accessing and manipulating streamflow information from the
National Water Information System (NWIS; https://waterdata.usgs.gov/) provided by the
United States Geological Survey (USGS; https://www.usgs.gov/).
"""
# As written this class provides fucntions for downloading daily streamgage observations
# Additional functionality (e.g. monthyly, annual, other statistics) may be added at a future time.
def __init__(self, gage_ids=None, st_date=None, en_date=None, verbose=False):
"""Create the NWIS object.
:param list[str] gage_ids: list of streamgages to retrieve
:param st_date: start date for retrieving streamgage observations
:type st_date: None or datetime
:param en_date: end date for retrieving streamgage observations
:type en_date: None or datetime
:param bool verbose: output additional debuggin information
"""
self.logger = logging.getLogger('bandit.NWIS')
self.logger.info('NWIS instance')
self.__stdate = st_date
self.__endate = en_date
self.__gageids = gage_ids
self.__outdata = None
self.__date_range = None
self.__final_outorder = None
self.__verbose = verbose
# Regex's for stripping unneeded clutter from the rdb file
self.__t1 = re.compile('^#.*$\n?', re.MULTILINE) # remove comment lines
self.__t2 = re.compile('^5s.*$\n?', re.MULTILINE) # remove field length lines
@property
def start_date(self):
"""Get the start date.
:returns: start date
:rtype: None or datetime
"""
return self.__stdate
@start_date.setter
def start_date(self, st_date):
"""Set the start date.
:param st_date: start date (either a datetime object or a string of the form YYYY-MM-DD)
:type st_date: datetime or str
"""
# Set the starting date for retrieval
# As written this will clear any streamgage observations that have been downloaded.
if isinstance(st_date, datetime.datetime):
self.__stdate = st_date
else:
try:
# Assume a string of form 'YYYY-MM-DD' was provided
self.__stdate = datetime(*[int(xx) for xx in re.split('-| |:', st_date)])
except ValueError as dt_err:
# Wrong form for date was provided
print_error('Date must be either a datetime or of form "YYYY-MM-DD"')
print(dt_err)
self.__outdata = None
@property
def end_date(self):
"""Get the end date.
:returns: end date
:rtype: None or datetime
"""
return self.__endate
@end_date.setter
def end_date(self, en_date):
"""Set the end date.
:param en_date: end date (either a datetime object or a string of the form YYYY-MM-DD)
:type en_date: datetime or str
"""
if isinstance(en_date, datetime.datetime):
self.__endate = en_date
else:
try:
# Assume a string of form 'YYYY-MM-DD' was provided
self.__endate = datetime(*[int(xx) for xx in re.split('-| |:', en_date)])
except ValueError as dt_err:
# Wrong form for date was provided
print_error('Date must be either a datetime or of form "YYYY-MM-DD"')
print(dt_err)
self.__outdata = None
@property
def gage_ids(self):
"""Get list of streamgage IDs for retrieval.
:returns: list of streamgage IDs
:rtype: list[str]
"""
return self.__gageids
@gage_ids.setter
def gage_ids(self, gage_ids):
"""Set the streamgage ID(s) to retrieve from NWIS.
:param gage_ids: streamgage ID(s)
:type gage_ids: list or tuple or str
"""
# Set the gage ids for retrieval this will clear any downloaded observations
if isinstance(gage_ids, (list, tuple)):
self.__gageids = gage_ids
else:
# Assuming a single value, so convert to a list
self.__gageids = [gage_ids]
self.__outdata = None
def check_for_flag(self, pat, data, col_id):
"""Check for a given pattern in supplied data.
Checks for a given pattern in the data and log a warning if it occurs.
:param str pat: pattern to find
:param data: data for pattern matching
:param str col_id: column to search in data
"""
# Check for pattern in data, log error if found and remove from data
pat_count = data[col_id].str.contains(pat).sum()
if pat_count > 0:
pat_first_date = data[data[col_id].str.contains(pat)].index[0].strftime('%Y-%m-%d')
self.logger.warning('{} has {} records marked {}. '
'First occurrence at {}. Suffix removed from values'.format(col_id, pat_count, pat,
pat_first_date))
data[col_id].replace(pat, '', regex=True, inplace=True)
def initialize_dataframe(self):
"""Clears downloaded data and initializes the output dataframe.
"""
if not self.__endate:
self.__endate = datetime.today()
if not self.__stdate:
self.__stdate = datetime(1890, 1, 1)
# Create an initial dataframe that contains all dates in the date range.
# Any streamgage missing date(s) will have a NaN value for each missing date.
# Otherwise it is possible to have dates missing in the output.
self.__date_range = pd.date_range(start=self.__stdate, end=self.__endate, freq='D')
self.__outdata = pd.DataFrame(index=self.__date_range)
self.__final_outorder = ['year', 'month', 'day', 'hour', 'minute', 'second']
def get_daily_streamgage_observations(self):
"""Retrieves daily observations.
If gage_ids is set then retrieve observations for those streamgages; otherwise,
return a single dummy dataset. If st_date and en_date are set then observations
are restricted to the given date range.
"""
if not self.__outdata:
self.initialize_dataframe()
# Set timeout in seconds - if not set defaults to infinite time for response
timeout = 30
socket.setdefaulttimeout(timeout)
url_pieces = OrderedDict()
url_pieces['?format'] = 'rdb'
url_pieces['sites'] = ''
url_pieces['startDT'] = self.__stdate.strftime('%Y-%m-%d')
url_pieces['endDT'] = self.__endate.strftime('%Y-%m-%d')
url_pieces['statCd'] = '00003' # Mean values
url_pieces['siteStatus'] = 'all'
url_pieces['parameterCd'] = '00060' # Discharge
url_pieces['siteType'] = 'ST'
# url_pieces['access'] = '3' # Allows download of observations for restricted sites/parameters
if not self.__gageids:
# If no streamgages are provided then create a single dummy column filled with noData
self.logger.warning('No streamgages provided - dummy entry created.')
df = pd.DataFrame(index=self.__date_range, columns=['00000000'])
df.index.name = 'date'
self.__outdata = pd.merge(self.__outdata, df, how='left', left_index=True, right_index=True)
self.__final_outorder.append('00000000')
# Iterate over new_poi_gage_id and retrieve daily streamflow data from NWIS
for gidx, gg in enumerate(self.__gageids):
if self.__verbose:
sys.stdout.write('\r ')
sys.stdout.write('\rStreamgage: {} ({}/{}) '.format(gg, gidx + 1, len(self.__gageids)))
sys.stdout.flush()
url_pieces['sites'] = gg
url_final = '&'.join(['{}={}'.format(kk, vv) for kk, vv in iteritems(url_pieces)])
# Read site data from NWIS
attempts = 0
while attempts < RETRIES:
try:
response = urlopen('{}/dv/{}'.format(BASE_NWIS_URL, url_final))
try:
# Python 2.7.x
encoding = response.info().getparam('charset')
except AttributeError:
# Python 3.x
encoding = response.info().get_param('charset', failobj='utf8')
streamgage_obs_page = response.read().decode(encoding)
# with urlopen('{}/dv/{}'.format(BASE_NWIS_URL, url_final)) as response:
# encoding = response.info().get_param('charset', 'utf8')
# streamgage_obs_page = response.read().decode(encoding)
# streamgage_obs_page = urlopen('{}/dv/{}'.format(BASE_NWIS_URL, url_final))
break
except (HTTPError, URLError) as err:
attempts += 1
self.logger.warning('HTTPError: {}, Try {} of {}'.format(err, attempts, RETRIES))
# print('HTTPError: {}, Try {} of {}'.format(err, attempts, RETRIES))
except (ConnectionResetError) as err:
attempts += 1
self.logger.warning('ConnectionResetError: {}, Try {} of {}'.format(err, attempts, RETRIES))
time.sleep(10)
if streamgage_obs_page.splitlines()[0] == '# No sites found matching all criteria':
# No observations are available for the streamgage
# Create a dummy dataset to output
self.logger.warning('{} has no data for {} to {}'.format(gg,
self.__stdate.strftime('%Y-%m-%d'),
self.__endate.strftime('%Y-%m-%d')))
df = pd.DataFrame(index=self.__date_range, columns=[gg])
df.index.name = 'date'
else:
streamgage_observations = streamgage_obs_page
# streamgage_observations = streamgage_obs_page.read()
# Strip the comment lines and field length lines from the result using regex
streamgage_observations = self.__t1.sub('', streamgage_observations, count=0)
streamgage_observations = self.__t2.sub('', streamgage_observations, count=0)
# Have to enforce site_no as string/text
col_names = ['site_no']
col_types = [np.str_]
cols = dict(zip(col_names, col_types))
# Read the rdb file into a dataframe
# TODO: Handle empty datasets from NWIS by creating dummy data and providing a warning
df = pd.read_csv(StringIO(streamgage_observations), sep='\t', dtype=cols,
parse_dates={'date': ['datetime']}, index_col='date')
# Conveniently the columns we want to drop contain '_cd' in their names
drop_cols = [col for col in df.columns if '_cd' in col]
df.drop(drop_cols, axis=1, inplace=True)
# There should now only be date, site_no, and a Q column named *_00060_00003
# We will rename the *_00060_00003 to mean_val
rename_col = [col for col in df.columns if '_00060_00003' in col]
if len(rename_col) > 1:
self.logger.warning('{} had more than one Q-col returned; empty dataset used.'.format(gg))
df = pd.DataFrame(index=self.__date_range, columns=[gg])
df.index.name = 'date'
# self.logger.warning('{} had more than one Q-col returned; using {}'.format(gg, rename_col[0]))
#
# # Keep the first TS column and drop the others
# while len(rename_col) > 1:
# curr_col = rename_col.pop()
# df.drop([curr_col], axis=1, inplace=True)
else:
df.rename(columns={rename_col[0]: gg}, inplace=True)
try:
# If no flags are present the column should already be float
pd.to_numeric(df[gg], errors='raise')
except ValueError:
self.logger.warning('{} had one or more flagged values; flagged values converted to NaN.'.format(gg))
df[gg] = pd.to_numeric(df[gg], errors='coerce')
# Check for discontinued gage records
# if df[gg].dtype == np.object_:
# # If the datatype of the streamgage values is np.object_ that
# # means some string is appended to one or more of the values.
#
# # Common bad data: set(['Eqp', 'Ice', 'Ssn', 'Rat', 'Bkw', '***', 'Dis'])
#
# # Check for discontinued flagged records
# self.check_for_flag('_?Dis', df, gg)
#
# # Check for ice-flagged records
# self.check_for_flag('_?Ice', df, gg)
#
# # Check for eqp-flagged records (Equipment malfunction)
# self.check_for_flag('_?Eqp', df, gg)
#
# # Check for _Ssn (parameter monitored seasonally)
# self.check_for_flag('_?Ssn', df, gg)
#
# # Check for _Rat (rating being developed)
# self.check_for_flag('_?Rat', df, gg)
#
# # Check for _Bkw (Value is affected by backwater at the measurement site)
# self.check_for_flag('_?Bkw', df, gg)
#
# # Check for 1 or more astericks
# self.check_for_flag('_?\*+', df, gg)
# Resample to daily to fill in the missing days with NaN
# df = df.resample('D').mean()
self.__outdata =
|
pd.merge(self.__outdata, df, how='left', left_index=True, right_index=True)
|
pandas.merge
|
import unittest
import copy
import numpy as np
import numpy.testing as np_test
import pandas as pd
import pandas.testing as pd_test
import warnings
from pyblackscholesanalytics.market.market import MarketEnvironment
from pyblackscholesanalytics.options.options import PlainVanillaOption, DigitalOption
from pyblackscholesanalytics.utils.utils import scalarize
class TestPlainVanillaOption(unittest.TestCase):
"""Class to test public methods of PlainVanillaOption class"""
def setUp(self) -> None:
warnings.filterwarnings("ignore")
# common market environment
mkt_env = MarketEnvironment()
# option objects
self.call_opt = PlainVanillaOption(mkt_env)
self.put_opt = PlainVanillaOption(mkt_env, option_type="put")
# pricing parameters
S_scalar = 100
S_vector = [90, 100, 110]
t_scalar_string = "01-06-2020"
t_date_range = pd.date_range(start="2020-04-19", end="2020-12-21", periods=5)
# common pricing parameter setup
common_params = {"np_output": True, "minimization_method": "Least-Squares"}
# scalar parameters setup
self.scalar_params = copy.deepcopy(common_params)
self.scalar_params["S"] = S_scalar
self.scalar_params["t"] = t_scalar_string
# vector parameters setup
self.vector_params = copy.deepcopy(common_params)
self.vector_params["S"] = S_vector
self.vector_params["t"] = t_date_range
# complex pricing parameter setup
# (S scalar, K and t vector, sigma distributed as Kxt grid, r distributed as Kxt grid)
K_vector = [75, 85, 90, 95]
mK = len(K_vector)
n = 3
sigma_grid_K = np.array([0.1 * (1 + i) for i in range(mK * n)]).reshape(n, mK)
r_grid_K = np.array([0.01 * (1 + i) for i in range(mK * n)]).reshape(n, mK)
self.complex_params = {"S": S_vector[0],
"K": K_vector,
"t": pd.date_range(start="2020-04-19", end="2020-12-21", periods=n),
"sigma": sigma_grid_K,
"r": r_grid_K,
"np_output": False,
"minimization_method": "Least-Squares"}
def test_price_scalar(self):
"""Test price - scalar case"""
# call
test_call = scalarize(self.call_opt.price(**self.scalar_params))
expected_call = 7.548381716811839
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.price(**self.scalar_params))
expected_put = 4.672730506407959
self.assertEqual(test_put, expected_put)
def test_price_vector_np(self):
"""Test price - np.ndarray output case"""
# call
test_call = self.call_opt.price(**self.vector_params)
expected_call = np.array([[3.48740247e+00, 8.42523213e+00, 1.55968082e+01],
[2.53045128e+00, 7.14167587e+00, 1.43217796e+01],
[1.56095778e+00, 5.72684668e+00, 1.29736886e+01],
[5.89165298e-01, 4.00605304e+00, 1.14939139e+01],
[7.21585753e-04, 1.38927959e+00, 1.01386434e+01]])
np_test.assert_allclose(test_call, expected_call)
# put
test_put = self.put_opt.price(**self.vector_params)
expected_put = np.array([[1.00413306e+01, 4.97916024e+00, 2.15073633e+00],
[9.90791873e+00, 4.51914332e+00, 1.69924708e+00],
[9.75553655e+00, 3.92142545e+00, 1.16826738e+00],
[9.62127704e+00, 3.03816479e+00, 5.26025639e-01],
[9.86382907e+00, 1.25238707e+00, 1.75090342e-03]])
np_test.assert_allclose(test_put, expected_put)
def test_price_vector_df(self):
"""Test price - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.price(**self.vector_params)
expected_call = pd.DataFrame(data=[[3.48740247e+00, 8.42523213e+00, 1.55968082e+01],
[2.53045128e+00, 7.14167587e+00, 1.43217796e+01],
[1.56095778e+00, 5.72684668e+00, 1.29736886e+01],
[5.89165298e-01, 4.00605304e+00, 1.14939139e+01],
[7.21585753e-04, 1.38927959e+00, 1.01386434e+01]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call)
# put
test_put = self.put_opt.price(**self.vector_params)
expected_put = pd.DataFrame(data=[[1.00413306e+01, 4.97916024e+00, 2.15073633e+00],
[9.90791873e+00, 4.51914332e+00, 1.69924708e+00],
[9.75553655e+00, 3.92142545e+00, 1.16826738e+00],
[9.62127704e+00, 3.03816479e+00, 5.26025639e-01],
[9.86382907e+00, 1.25238707e+00, 1.75090342e-03]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put)
def test_PnL_scalar(self):
"""Test P&L - scalar case"""
# call
test_call = scalarize(self.call_opt.PnL(**self.scalar_params))
expected_call = 4.060979245868182
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.PnL(**self.scalar_params))
expected_put = -5.368600081057167
self.assertEqual(test_put, expected_put)
def test_PnL_vector_np(self):
"""Test P&L - np.ndarray output case"""
# call
test_call = self.call_opt.PnL(**self.vector_params)
expected_call = np.array([[0., 4.93782966, 12.10940574],
[-0.95695119, 3.6542734, 10.83437716],
[-1.92644469, 2.2394442, 9.48628613],
[-2.89823717, 0.51865057, 8.00651142],
[-3.48668089, -2.09812288, 6.65124095]])
np_test.assert_allclose(test_call, expected_call)
# put
test_put = self.put_opt.PnL(**self.vector_params)
expected_put = np.array([[0., -5.06217034, -7.89059426],
[-0.13341186, -5.52218727, -8.3420835],
[-0.28579403, -6.11990513, -8.87306321],
[-0.42005355, -7.0031658, -9.51530495],
[-0.17750152, -8.78894351, -10.03957968]])
np_test.assert_allclose(test_put, expected_put)
def test_PnL_vector_df(self):
"""Test P&L - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.PnL(**self.vector_params)
expected_call = pd.DataFrame(data=[[0., 4.93782966, 12.10940574],
[-0.95695119, 3.6542734, 10.83437716],
[-1.92644469, 2.2394442, 9.48628613],
[-2.89823717, 0.51865057, 8.00651142],
[-3.48668089, -2.09812288, 6.65124095]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call)
# put
test_put = self.put_opt.PnL(**self.vector_params)
expected_put = pd.DataFrame(data=[[0., -5.06217034, -7.89059426],
[-0.13341186, -5.52218727, -8.3420835],
[-0.28579403, -6.11990513, -8.87306321],
[-0.42005355, -7.0031658, -9.51530495],
[-0.17750152, -8.78894351, -10.03957968]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put)
def test_delta_scalar(self):
"""Test Delta - scalar case"""
# call
test_call = scalarize(self.call_opt.delta(**self.scalar_params))
expected_call = 0.6054075531684143
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.delta(**self.scalar_params))
expected_put = -0.3945924468315857
self.assertEqual(test_put, expected_put)
def test_delta_vector_np(self):
"""Test Delta - np.ndarray output case"""
# call
test_call = self.call_opt.delta(**self.vector_params)
expected_call = np.array([[3.68466757e-01, 6.15283790e-01, 8.05697003e-01],
[3.20097309e-01, 6.00702480e-01, 8.18280131e-01],
[2.54167521e-01, 5.83663527e-01, 8.41522350e-01],
[1.49152172e-01, 5.61339299e-01, 8.91560577e-01],
[8.89758553e-04, 5.23098767e-01, 9.98343116e-01]])
np_test.assert_allclose(test_call, expected_call)
# put
test_put = self.put_opt.delta(**self.vector_params)
expected_put = np.array([[-0.63153324, -0.38471621, -0.194303],
[-0.67990269, -0.39929752, -0.18171987],
[-0.74583248, -0.41633647, -0.15847765],
[-0.85084783, -0.4386607, -0.10843942],
[-0.99911024, -0.47690123, -0.00165688]])
np_test.assert_allclose(test_put, expected_put, rtol=5e-6)
def test_delta_vector_df(self):
"""Test Delta - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.delta(**self.vector_params)
expected_call = pd.DataFrame(data=[[3.68466757e-01, 6.15283790e-01, 8.05697003e-01],
[3.20097309e-01, 6.00702480e-01, 8.18280131e-01],
[2.54167521e-01, 5.83663527e-01, 8.41522350e-01],
[1.49152172e-01, 5.61339299e-01, 8.91560577e-01],
[8.89758553e-04, 5.23098767e-01, 9.98343116e-01]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call)
# put
test_put = self.put_opt.delta(**self.vector_params)
expected_put = pd.DataFrame(data=[[-0.63153324, -0.38471621, -0.194303],
[-0.67990269, -0.39929752, -0.18171987],
[-0.74583248, -0.41633647, -0.15847765],
[-0.85084783, -0.4386607, -0.10843942],
[-0.99911024, -0.47690123, -0.00165688]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_put.rename_axis("S", axis='columns', inplace=True)
expected_put.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_put, expected_put)
def test_gamma_scalar(self):
"""Test Gamma - scalar case"""
# call
test_call = scalarize(self.call_opt.gamma(**self.scalar_params))
expected_call = 0.025194958512498786
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.gamma(**self.scalar_params))
expected_put = copy.deepcopy(expected_call)
self.assertEqual(test_put, expected_put)
# assert call and put gamma coincide
self.assertEqual(test_call, test_put)
def test_gamma_vector_np(self):
"""Test Gamma - np.ndarray output case"""
# call
test_call = self.call_opt.gamma(**self.vector_params)
expected_call = np.array([[0.02501273, 0.02281654, 0.01493167],
[0.02725456, 0.02648423, 0.01645793],
[0.02950243, 0.03231528, 0.01820714],
[0.02925862, 0.0446913, 0.01918121],
[0.00101516, 0.12030889, 0.00146722]])
np_test.assert_allclose(test_call, expected_call, rtol=5e-6)
# put
test_put = self.put_opt.gamma(**self.vector_params)
expected_put = copy.deepcopy(expected_call)
np_test.assert_allclose(test_put, expected_put, rtol=5e-6)
# assert call and put gamma coincide
np_test.assert_allclose(test_call, test_put)
def test_gamma_vector_df(self):
"""Test Gamma - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.gamma(**self.vector_params)
expected_call = pd.DataFrame(data=[[0.02501273, 0.02281654, 0.01493167],
[0.02725456, 0.02648423, 0.01645793],
[0.02950243, 0.03231528, 0.01820714],
[0.02925862, 0.0446913, 0.01918121],
[0.00101516, 0.12030889, 0.00146722]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call)
# put
test_put = self.put_opt.gamma(**self.vector_params)
expected_put = copy.deepcopy(expected_call)
pd_test.assert_frame_equal(test_put, expected_put)
# assert call and put gamma coincide
pd_test.assert_frame_equal(test_call, test_put)
def test_vega_scalar(self):
"""Test Vega - scalar case"""
# call
test_call = scalarize(self.call_opt.vega(**self.scalar_params))
expected_call = 0.29405622811847903
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.vega(**self.scalar_params))
expected_put = copy.deepcopy(expected_call)
self.assertEqual(test_put, expected_put)
# assert call and put vega coincide
self.assertEqual(test_call, test_put)
def test_vega_vector_np(self):
"""Test Vega - np.ndarray output case"""
# call
test_call = self.call_opt.vega(**self.vector_params)
expected_call = np.array([[0.28419942, 0.32005661, 0.2534375],
[0.23467293, 0.28153094, 0.21168961],
[0.17415326, 0.23550311, 0.16055207],
[0.09220072, 0.17386752, 0.09029355],
[0.00045056, 0.06592268, 0.00097279]])
np_test.assert_allclose(test_call, expected_call, rtol=1e-5)
# put
test_put = self.put_opt.vega(**self.vector_params)
expected_put = copy.deepcopy(expected_call)
np_test.assert_allclose(test_put, expected_put, rtol=1e-5)
# assert call and put vega coincide
np_test.assert_allclose(test_call, test_put)
def test_vega_vector_df(self):
"""Test Vega - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.vega(**self.vector_params)
expected_call = pd.DataFrame(data=[[0.28419942, 0.32005661, 0.2534375],
[0.23467293, 0.28153094, 0.21168961],
[0.17415326, 0.23550311, 0.16055207],
[0.09220072, 0.17386752, 0.09029355],
[0.00045056, 0.06592268, 0.00097279]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
pd_test.assert_frame_equal(test_call, expected_call, check_less_precise=True)
# put
test_put = self.put_opt.vega(**self.vector_params)
expected_put = copy.deepcopy(expected_call)
pd_test.assert_frame_equal(test_put, expected_put, check_less_precise=True)
# assert call and put vega coincide
pd_test.assert_frame_equal(test_call, test_put)
def test_theta_scalar(self):
"""Test Theta - scalar case"""
# call
test_call = scalarize(self.call_opt.theta(**self.scalar_params))
expected_call = -0.021064685979455443
self.assertEqual(test_call, expected_call)
# put
test_put = scalarize(self.put_opt.theta(**self.scalar_params))
expected_put = -0.007759980665812141
self.assertEqual(test_put, expected_put)
def test_theta_vector_np(self):
"""Test Theta - np.ndarray output case"""
# call
test_call = self.call_opt.theta(**self.vector_params)
expected_call = np.array([[-0.01516655, -0.01977662, -0.01990399],
[-0.01569631, -0.02176239, -0.0212802],
[-0.01601397, -0.02491789, -0.02297484],
[-0.01474417, -0.03162919, -0.02457737],
[-0.00046144, -0.0728981, -0.01462746]])
np_test.assert_allclose(test_call, expected_call, rtol=5e-4)
# put
test_put = self.put_opt.theta(**self.vector_params)
expected_put = np.array([[-0.00193999, -0.00655005, -0.00667743],
[-0.00235693, -0.00842301, -0.00794082],
[-0.00256266, -0.01146658, -0.00952353],
[-0.00117813, -0.01806315, -0.01101133],
[0.01321844, -0.05921823, -0.00094758]])
np_test.assert_allclose(test_put, expected_put, rtol=1e-5)
def test_theta_vector_df(self):
"""Test Theta - pd.DataFrame output case"""
# request Pandas DataFrame as output format
self.vector_params["np_output"] = False
# call
test_call = self.call_opt.theta(**self.vector_params)
expected_call = pd.DataFrame(data=[[-0.01516655, -0.01977662, -0.01990399],
[-0.01569631, -0.02176239, -0.0212802],
[-0.01601397, -0.02491789, -0.02297484],
[-0.01474417, -0.03162919, -0.02457737],
[-0.00046144, -0.0728981, -0.01462746]],
index=self.vector_params["t"],
columns=self.vector_params["S"])
expected_call.rename_axis("S", axis='columns', inplace=True)
expected_call.rename_axis("t", axis='rows', inplace=True)
|
pd_test.assert_frame_equal(test_call, expected_call, check_less_precise=True)
|
pandas.testing.assert_frame_equal
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import json
import numpy as np
import pytest
import pyarrow as pa
from pyarrow.fs import LocalFileSystem, SubTreeFileSystem
from pyarrow.tests.parquet.common import (
parametrize_legacy_dataset, parametrize_legacy_dataset_not_supported)
from pyarrow.util import guid
from pyarrow.vendored.version import Version
try:
import pyarrow.parquet as pq
from pyarrow.tests.parquet.common import (_read_table, _test_dataframe,
_write_table)
except ImportError:
pq = None
try:
import pandas as pd
import pandas.testing as tm
from pyarrow.tests.parquet.common import (_roundtrip_pandas_dataframe,
alltypes_sample)
except ImportError:
pd = tm = None
@pytest.mark.pandas
def test_pandas_parquet_custom_metadata(tempdir):
df = alltypes_sample(size=10000)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
assert b'pandas' in arrow_table.schema.metadata
_write_table(arrow_table, filename, version='2.6', coerce_timestamps='ms')
metadata = pq.read_metadata(filename).metadata
assert b'pandas' in metadata
js = json.loads(metadata[b'pandas'].decode('utf8'))
assert js['index_columns'] == [{'kind': 'range',
'name': None,
'start': 0, 'stop': 10000,
'step': 1}]
@pytest.mark.pandas
def test_merging_parquet_tables_with_different_pandas_metadata(tempdir):
# ARROW-3728: Merging Parquet Files - Pandas Meta in Schema Mismatch
schema = pa.schema([
pa.field('int', pa.int16()),
pa.field('float', pa.float32()),
pa.field('string', pa.string())
])
df1 = pd.DataFrame({
'int': np.arange(3, dtype=np.uint8),
'float': np.arange(3, dtype=np.float32),
'string': ['ABBA', 'EDDA', 'ACDC']
})
df2 = pd.DataFrame({
'int': [4, 5],
'float': [1.1, None],
'string': [None, None]
})
table1 = pa.Table.from_pandas(df1, schema=schema, preserve_index=False)
table2 = pa.Table.from_pandas(df2, schema=schema, preserve_index=False)
assert not table1.schema.equals(table2.schema, check_metadata=True)
assert table1.schema.equals(table2.schema)
writer = pq.ParquetWriter(tempdir / 'merged.parquet', schema=schema)
writer.write_table(table1)
writer.write_table(table2)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_parquet_column_multiindex(tempdir, use_legacy_dataset):
df = alltypes_sample(size=10)
df.columns = pd.MultiIndex.from_tuples(
list(zip(df.columns, df.columns[::-1])),
names=['level_1', 'level_2']
)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
assert arrow_table.schema.pandas_metadata is not None
_write_table(arrow_table, filename, version='2.6', coerce_timestamps='ms')
table_read = pq.read_pandas(
filename, use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_parquet_2_0_roundtrip_read_pandas_no_index_written(
tempdir, use_legacy_dataset
):
df = alltypes_sample(size=10000)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
js = arrow_table.schema.pandas_metadata
assert not js['index_columns']
# ARROW-2170
# While index_columns should be empty, columns needs to be filled still.
assert js['columns']
_write_table(arrow_table, filename, version='2.6', coerce_timestamps='ms')
table_read = pq.read_pandas(
filename, use_legacy_dataset=use_legacy_dataset)
js = table_read.schema.pandas_metadata
assert not js['index_columns']
read_metadata = table_read.schema.metadata
assert arrow_table.schema.metadata == read_metadata
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
# TODO(dataset) duplicate column selection actually gives duplicate columns now
@pytest.mark.pandas
@parametrize_legacy_dataset_not_supported
def test_pandas_column_selection(tempdir, use_legacy_dataset):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16)
})
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, filename)
table_read = _read_table(
filename, columns=['uint8'], use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df[['uint8']], df_read)
# ARROW-4267: Selection of duplicate columns still leads to these columns
# being read uniquely.
table_read = _read_table(
filename, columns=['uint8', 'uint8'],
use_legacy_dataset=use_legacy_dataset)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df[['uint8']], df_read)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_parquet_native_file_roundtrip(tempdir, use_legacy_dataset):
df = _test_dataframe(10000)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version='2.6')
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = _read_table(
reader, use_legacy_dataset=use_legacy_dataset).to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_read_pandas_column_subset(tempdir, use_legacy_dataset):
df = _test_dataframe(10000)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version='2.6')
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = pq.read_pandas(
reader, columns=['strings', 'uint8'],
use_legacy_dataset=use_legacy_dataset
).to_pandas()
tm.assert_frame_equal(df[['strings', 'uint8']], df_read)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_pandas_parquet_empty_roundtrip(tempdir, use_legacy_dataset):
df = _test_dataframe(0)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version='2.6')
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = _read_table(
reader, use_legacy_dataset=use_legacy_dataset).to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_can_write_nested_data(tempdir):
data = {
"agg_col": [
{"page_type": 1},
{"record_type": 1},
{"non_consecutive_home": 0},
],
"uid_first": "1001"
}
df =
|
pd.DataFrame(data=data)
|
pandas.DataFrame
|
"""Standardize simulated sequence dates to human readable format (YYYY-MM-DD).
"""
import argparse
import datetime
import pandas as pd
def float_to_datestring(time):
"""Convert a floating point date from TreeTime `numeric_date` to a date string
"""
# Extract the year and remainder from the floating point date.
year = int(time)
remainder = time - year
# Calculate the day of the year (out of 365 + 0.25 for leap years).
tm_yday = int(remainder * 365.25)
if tm_yday == 0:
tm_yday = 1
# Construct a date object from the year and day of the year.
date = datetime.datetime.strptime("%s-%s" % (year, tm_yday), "%Y-%j")
# Build the date string with zero-padded months and days.
date_string = "%s-%.2i-%.2i" % (date.year, date.month, date.day)
return date_string
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--metadata", help="metadata for simulated sequences")
parser.add_argument("--start-year", default=2000.0, type=float, help="year to start simulated dates from")
parser.add_argument("--generations-per-year", default=200.0, type=float, help="number of generations to map to a single yeasr")
parser.add_argument("--output", help="metadata with standardized dates and nonzero fitness records")
args = parser.parse_args()
df = pd.read_csv(args.metadata, sep="\t")
df["num_date"] = args.start_year + (df["generation"] / args.generations_per_year)
df["date"] = df["num_date"].apply(float_to_datestring)
df["year"] =
|
pd.to_datetime(df["date"])
|
pandas.to_datetime
|
import os
import yaml
import pandas as pd
import argparse
import datetime
from azureml.core import Run
from src.data.preprocess import preprocess
parser = argparse.ArgumentParser()
parser.add_argument('--rawdatadir', type=str, help="Raw HIFIS client data directory")
parser.add_argument('--inferencedir', type=str, help="directory containing all files necessary for inference")
parser.add_argument('--preprocessedoutputdir', type=str, help="intermediate serialized pipeline data")
args = parser.parse_args()
run = Run.get_context()
cur_date = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
# Modify paths in config file based the Azure datastore paths passed as arguments.
cfg = yaml.full_load(open("./config.yml", 'r')) # Load config data
cfg['PATHS']['RAW_DATA'] = args.rawdatadir + '/' + cfg['PATHS']['RAW_DATA'].split('/')[-1]
cfg['PATHS']['RAW_SPDAT_DATA'] = args.rawdatadir + '/' + cfg['PATHS']['RAW_SPDAT_DATA'].split('/')[-1]
cfg['PATHS']['PROCESSED_DATA'] = args.preprocessedoutputdir + '/' + cfg['PATHS']['PROCESSED_DATA'].split('/')[-1]
cfg['PATHS']['PROCESSED_OHE_DATA'] = args.preprocessedoutputdir + '/' + cfg['PATHS']['PROCESSED_OHE_DATA'].split('/')[-1]
cfg['PATHS']['TRAIN_SET'] = args.preprocessedoutputdir + '/' + cfg['PATHS']['TRAIN_SET'].split('/')[-1]
cfg['PATHS']['TEST_SET'] = args.preprocessedoutputdir + '/' + cfg['PATHS']['TEST_SET'].split('/')[-1]
cfg['PATHS']['GROUND_TRUTH'] = args.preprocessedoutputdir + '/' + cfg['PATHS']['GROUND_TRUTH'].split('/')[-1]
# A few checks to screen for problems with SQL query that retrieves HIFIS data. In these cases, send alert email.
raw_df = pd.read_csv(cfg['PATHS']['RAW_DATA'], encoding="ISO-8859-1", low_memory=False)
# Load meta-info from the last retrieved snapshot of raw HIFIS data
raw_data_info_path = args.inferencedir + '/raw_data_info.yml'
if os.path.exists(raw_data_info_path):
raw_data_info = yaml.full_load(open(raw_data_info_path, 'r')) # Load config data
else:
raw_data_info = {'N_ROWS': raw_df.shape[0], 'N_COLS': raw_df.shape[1]}
check_date = datetime.datetime.today() - datetime.timedelta(days=7) # 1 week ago from today
raw_df['DateStart'] =
|
pd.to_datetime(raw_df['DateStart'], errors='coerce')
|
pandas.to_datetime
|
"""Web interface"""
import re
import base64
import numpy as np
import os
import pandas as pd
from sklearn.manifold import TSNE
import spacy
import streamlit as st
from textblob import TextBlob
import src.analyzer as az
import src.constants as cts
import src.doc_similarity as ds
import src.get_handler as gh
import src.json_util as ju
import src.markdown as md
import src.summarizer as sz
import src.topic_modeling as tm
import src.visualization as vis
# resources/sample_reflections/lab1, resources/sample_reflections/lab2
# initialize main_df and preprocessed_Df
SPACY_MODEL_NAMES = ["en_core_web_sm", "en_core_web_md"]
preprocessed_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""-------------------- 配置环境 --------------------"""
# 导入需要的包
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from collections import defaultdict
from surprise import SVD
from surprise import Dataset
from surprise import Reader
import pandas as pd
import os
from scipy.stats import spearmanr
import numpy as np
from pyspark.sql import SparkSession
from pyspark.sql import types
from pyspark import SQLContext,HiveContext
from pyspark.ml.feature import VectorAssembler, StringIndexer, OneHotEncoder, Bucketizer
from pyspark.ml import Pipeline, Transformer, Model
from pyspark.ml.pipeline import PipelineModel
from pyhive import hive
import pandas as pd
#连接云超的数据库,端口号,hive:10001,impala:21051
def get_data_from_hive(query):
conn=hive.connect(host='10.1.53.19',port=21051,username='songyuanchen',password='<PASSWORD>',auth='LDAP')
cur=conn.cursor()
cur.execute(query)
data=cur.fetchall()
columnDes=cur.description #获取连接对象的描述信息
columnNames=[columnDes[i][0] for i in range(len(columnDes))]
df=pd.DataFrame([list(i) for i in data],columns=columnNames)
cur.close()
return df
os.system("source setpython_spark spark2 python3.5")
os.environ["PYSPARK_PYTHON"]='/usr/bin/python3.5'
os.environ["PYSPARK_DRIVER_PYTHON"]='/usr/bin/python3.5'
spark=SparkSession.builder.appName("new_peizhi") \
.master("yarn") \
.config('spark.executor.instances',5) \
.config('spark.executor.cores', 20) \
.config("spark.executor.memory", '5G') \
.config("spark.port.maxRetries", 100) \
.config("spark.driver.maxResultSize", '4G') \
.config("spark.serializer", 'org.apache.spark.serializer.KryoSerializer') \
.config('spark.driver.memory','4G') \
.config('spark.default.parallelism',60) \
.config("spark.shuffle.file.buffer", '128k') \
.config("spark.reducer.maxSizeInFlight", '96m') \
.config("spark.dynamicAllocation.enabled", False)\
.enableHiveSupport() \
.getOrCreate()
"""-------------------- 获取每家门店对在售商品的评分 --------------------"""
# 获取每家门店对每种商品的评分,评分已经计算存入hive中
# 评分的计算方法是把每家门店的商品按销量升序排列,以累积分位数*5作为评分
query=r"SELECT shop_id,goods_id,sum(ratio) over(PARTITION BY shop_id ORDER BY row_num ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) FROM shushi_tmp.wanglinhan_matrix_factorization_shop_goods_ratio_order_big"
shop_goods_ratio_cumsum=get_data_from_hive(query)
shop_goods_rating=shop_goods_ratio_cumsum
shop_goods_rating.columns = ['shop_id','goods_id','rating'] # 对数据进行更名便于后续调用
shop_goods_rating['rating']=shop_goods_rating['rating']*5 # 整列*5,将评分映射到0-5之间
"""-------------------- 对每家门店进行推荐 --------------------"""
def get_top_n(predictions, n=10):
"""获得对每家门店的Top-N商品推荐
Args:
predictions(list of Prediction objects): 调用推荐算法之后得到的结果
n(int): 为每个门店推荐的商品个数,默认是10个
Returns:
一个字典,键是shop_id,值是由列表构成的元组
"""
# 首先把预测值和用户相对应
top_n = defaultdict(list)
for uid, iid, true_r, est, _ in predictions:
top_n[uid].append((iid, est))
# 然后将预测值排序,选出k个评分最高的
for uid, user_ratings in top_n.items():
user_ratings.sort(key=lambda x: x[1], reverse=True)
top_n[uid] = user_ratings[:n]
return top_n
# 需要一个reader来处理数据,需要指定评分范围
reader = Reader(rating_scale=(0,5))
# 用reader导入并处理之前计算得到的评分数据
data = Dataset.load_from_df(shop_goods_rating[['shop_id','goods_id','rating']], reader)
# 生成训练集,并拟合SVD模型
trainset = data.build_full_trainset()
algo = SVD()
algo.fit(trainset)
# 预测所有门店对所有商品的评分
testset = trainset.build_testset() # 在原始数据集中出现过的商品评分
testset1 = trainset.build_anti_testset() # 每家门店没有售卖过的商品
testset.extend(testset1) # 二者组合可得到所有门店和所有商品的组合
predictions = algo.test(testset) # 用分解矩阵来拟合得到预测结果
# 得到为每家门店推荐的评分排名前2000的商品及其评分
top_n = get_top_n(predictions, n=2000)
"""-------------------- 通过比较两种相关性对推荐算法的结果进行有效性检验 --------------------"""
# 计算相关性检验指标的大循环
query=r"SELECT DISTINCT(shop_id) from shushi_tmp.shop_goods_5_6_last_7_month_avg_amt_abe_effect_9M5R_to_9010_angelo_20210619"
shop_id_df=get_data_from_hive(query)
# 得到所有商品的列表
shop_id_list=[]
for i in range(len(shop_id_df)):
shop_id_list.append(shop_id_df.iloc[i,0])
# corr_record用来记录每个门店的两种相关性
corr_record=pd.DataFrame(columns=('shop_id','corr1','corr2'))
count=0
for j in range(len(shop_id_list)):
# 得到当前计算的门店的名称shop_name
shop_name_pre=shop_id_list[j]
shop_name='{!r}'.format(shop_name_pre)
recommendation_tuple_list=top_n[shop_name_pre]
# 取出当前计算门店在售的商品
query_pre=r"select goods_id from shushi_tmp.shop_goods_5_6_last_7_month_avg_amt_abe_effect_9M5R_to_9010_angelo_20210619 where shop_id = "+ shop_name
query=query_pre
goods_one_shop=get_data_from_hive(query)
# 以列表的形式获得商品
goods_list=[]
for i in range(len(goods_one_shop)):
goods_list.append(goods_one_shop.iloc[i,0])
# 获得推荐列表和在售商品的交集
intersection_goods_list=[]
intersection_goods_score=[]
for i in recommendation_tuple_list:
if i[0] in goods_list:
intersection_goods_list.append(i[0])
intersection_goods_score.append(i[1])
df_dict={"goods_id":intersection_goods_list,
"score":intersection_goods_score}
goods_score_df=pd.DataFrame(df_dict)
# 取当前计算门店的销售数据
query_pre=r'SELECT goods_id,sales_amt from shushi_tmp.shop_goods_5_6_last_7_month_avg_amt_abe_effect_9M5R_to_9010_angelo_20210619 WHERE shop_id = '+shop_name
query=query_pre
goods_sales_amt_one_shop=get_data_from_hive(query)
# 取所有门店的销售数据
query=r"SELECT goods_id,sum(sales_amt) from shushi_tmp.shop_goods_5_6_last_7_month_avg_amt_abe_effect_9M5R_to_9010_angelo_20210619 GROUP BY goods_id"
goods_sales_amt_all=get_data_from_hive(query)
# 第一个相关性是推荐商品评分和当前计算门店商品销量的spearman相关系数
corr1_df=pd.merge(goods_score_df,goods_sales_amt_one_shop,on='goods_id')
corr1_df['sales_amt']=pd.to_numeric(corr1_df['sales_amt'])
corr1_result=corr1_df[['score','sales_amt']].corr(method='spearman')
corr1=corr1_result.iloc[0,1]
# 第二个相关性是当前计算门店销量和所有门店商品销量的spearman相关系数
corr2_df_pre=pd.merge(goods_score_df,goods_sales_amt_one_shop,on='goods_id')
corr2_df=pd.merge(corr2_df_pre,goods_sales_amt_all,on='goods_id')
corr2_df['sales_amt']=pd.to_numeric(corr2_df['sales_amt'])
corr2_df['sum(sales_amt)']=
|
pd.to_numeric(corr2_df['sum(sales_amt)'])
|
pandas.to_numeric
|
"""
Silicone's custom statistical operations.
"""
import os
import numpy as np
import pandas as pd
import scipy.interpolate
def rolling_window_find_quantiles(
xs, ys, quantiles, nwindows=11, decay_length_factor=1
):
"""
Perform quantile analysis in the y-direction for x-weighted data.
Divides the x-axis into nwindows of equal length and weights data by how close they
are to the center of these boxes. Then returns the quantiles of this weighted data.
Quantiles are defined so that the values returned are always equal to a y-value in
the data - there is no interpolation. Extremal points are given their full
weighting, meaning this will not agree with the np.quantiles under uniform weighting
(which effectively gives 0 weight to min and max values)
The weighting of a point at :math:`x` for a window centered at :math:`x_0` is:
.. math::
w = \\frac{1}{1 + \\left (\\frac{x - x_0}{ \\text{box_length} \\times \\text{decay_length_factor} } \\right)^2}
Parameters
----------
xs : np.ndarray, :obj:`pd.Series`
The x co-ordinates to use in the regression.
ys : np.ndarray, :obj:`pd.Series`
The y co-ordinates to use in the regression.
quantiles : list-like
The quantiles to calculate in each window
nwindows : int
How many points to evaluate between x_max and x_min. Must be > 1.
decay_length_factor : float
gives the distance over which the weighting of the values falls to 1/4,
relative to half the distance between window centres. Defaults to 1. Formula is
:math:`w = \\left ( 1 + \\left( \\frac{\\text{distance}}{\\text{box_length} \\times \\text{decay_length_factor}} \\right)^2 \\right)^{-1}`.
Returns
-------
:obj:`pd.DataFrame`
Quantile values at the window centres.
Raises
------
AssertionError
``xs`` and ``ys`` don't have the same shape
"""
if xs.shape != ys.shape:
raise AssertionError("`xs` and `ys` must be the same shape")
if isinstance(quantiles, (float, np.float64)):
quantiles = [quantiles]
# min(xs) == max(xs) cannot be accessed via QRW cruncher, as a short-circuit appears
# earlier in the code.
if np.equal(max(xs), min(xs)):
# We must prevent singularity behaviour if all the points have the same x.
window_centers = np.array([xs[0]])
decay_length = 1
if np.equal(max(ys), min(ys)):
return pd.DataFrame(index=window_centers, columns=quantiles, data=ys[0])
else:
# We want to include the max x point, but not any point above it.
# The 0.99 factor prevents rounding error inclusion.
step = (max(xs) - min(xs)) / (nwindows - 1)
decay_length = step / 2 * decay_length_factor
window_centers = np.arange(min(xs), max(xs) + step * 0.99, step)
ys, xs = map(np.array, zip(*sorted(zip(ys, xs))))
results = pd.DataFrame(index=window_centers, columns=quantiles)
results.columns.name = "window_centers"
for window_center in window_centers:
weights = 1.0 / (1.0 + ((xs - window_center) / decay_length) ** 2)
weights /= sum(weights)
# We want to calculate the weights at the midpoint of step
# corresponding to the y-value.
cumsum_weights = np.cumsum(weights) - 0.5 * weights
results.loc[window_center, quantiles] = scipy.interpolate.interp1d(
cumsum_weights,
ys,
bounds_error=False,
fill_value=(ys[0], ys[-1]),
assume_sorted=True,
)(quantiles)
return results
def calc_all_emissions_correlations(emms_df, years, output_dir):
"""
Save csv files of the correlation coefficients and the rank correlation
coefficients between emissions at specified times.
This function includes all undivided emissions (i.e. results recorded as
`Emissions|X`) and CO2 emissions split once (i.e. `Emissions|CO2|X`). It does not
include Kyoto gases. It will also save the average absolute value of the
coefficients.
Parameters
----------
emms_df : :obj:`pyam.IamDataFrame`
The database to search for correlations between named values
output_dir : str
The folder location to save the files.
years : list[int]
The years upon which to calculate correlations.
Files created
-------------
"variable_counts.csv" : the number of scenario/model pairs where the emissions
data occurs.
"gases_correlation_{year}.csv" : The Pearson's correlation between gases emissions
in a given year.
"gases_rank_correlation_{year}.csv" : The Spearman's rank correlation between
gases in a given year
"time_av_absolute_correlation_{}_to_{}.csv" : The magnitude of the Pearson's
correlation between emissions, averaged over the years requested.
"time_av_absolute_rank_correlation_{}_to_{}.csv" : The magnitude of the Spearman's
rank correlation between emissions, averaged over the years requested.
"time_variance_rank_correlation_{}_to_{}.csv" : The variance over time in the rank
correlation values above.
"""
assert len(emms_df.regions()) == 1, "Calculation is for only one region"
# Obtain the list of gases to examine
df_gases = (
emms_df.filter(level=1)
.filter(variable="Emissions|*")
.filter(variable="Emissions|Kyoto*", keep=False)
.append(emms_df.filter(level=2).filter(variable="Emissions|CO2*"))
.variables(True)
.set_index("variable")
)
all_correlations_df = pd.DataFrame(
index=df_gases.index, columns=df_gases.index, data=0
)
all_rank_corr_df = pd.DataFrame(
index=df_gases.index, columns=df_gases.index, data=0
)
all_rank_corr_var_df = pd.DataFrame(
index=df_gases.index, columns=df_gases.index, data=0
)
# Calculate the total amount of data
var_count_file = "variable_counts.csv"
var_count =
|
pd.Series(index=df_gases.index, dtype=int)
|
pandas.Series
|
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
import matplotlib as mpl
mpl.rcParams['font.sans-serif'] = ['STHeiti']
mpl.rcParams['font.serif'] = ['STHeiti']
import seaborn as sns
sns.set_style("darkgrid",{"font.sans-serif":['STHeiti', 'STHeiti']})
import sys
stdout = sys.stdout
reload(sys)
sys.setdefaultencoding('utf-8')
sys.stdout = stdout
rate = pd.read_csv('./data/mixed_funds_rate.csv')
rate['date'] = pd.to_datetime(rate['date'])
rate.set_index('date',inplace=True)
rate.head()
years = np.arange(2001,2018)
funds_nums_with_data = []
for year in years:
data = rate[str(year)]
max_days_with_data = data.notnull().sum().max()
funds_nums_with_data.append((data.notnull().sum() == max_days_with_data).sum())
year_count = pd.DataFrame()
year_count['nums'] = funds_nums_with_data
year_count['year'] = years.astype(str)
year_count['year'] = pd.to_datetime(year_count['year'])
year_count.set_index('year',inplace=True)
ax = year_count.plot(kind='bar')
xtl=[item.get_text()[:4] for item in ax.get_xticklabels()]
ax.set_xticklabels(xtl)
plt.gcf().autofmt_xdate()
df = rate['2016':'2017'].dropna(how='all')
df.notnull().sum().plot(ls='None',marker='.')
df400 = df[df.columns[df.notnull().sum()>400]].fillna(0)
df400.sort_index(ascending=True,inplace=True)
df400.to_csv('data/two_years_data.csv')
rm = df400.mean(axis=1)
returns_mean = (rm/100+1).cumprod()-1
hs300 =
|
pd.read_csv('./data/hs300.csv',header=None,names={'date','hs300'})
|
pandas.read_csv
|
# Import required modules
import pandas as pd
import cfbd
import time
# Import files
import write_to_db
##########################################
year = 2009
##########################################
# Configure API key for College Football Data API
cfbd_config = cfbd.Configuration()
cfbd_config.api_key[
"Authorization"
] = "<KEY>"
cfbd_config.api_key_prefix["Authorization"] = "Bearer"
teams_api = cfbd.TeamsApi(cfbd.ApiClient(cfbd_config))
games_api = cfbd.GamesApi(cfbd.ApiClient(cfbd_config))
# Build out DataFrame with basic team info
teams = teams_api.get_fbs_teams()
# teams_df = pd.DataFrame.from_records(
# [
# dict(
# id=t.id,
# team=t.school,
# conference=t.conference,
# division=t.division,
# city=t.location["city"],
# state=t.location["state"],
# )
# for t in teams
# ]
# )
test_df =
|
pd.DataFrame.from_dict({"team": ["Alabama"]})
|
pandas.DataFrame.from_dict
|
# Copyright 2020-2021 AstroLab Software
# Author: <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from flask import request, jsonify, Response
from flask import send_file
from PIL import Image as im
from matplotlib import cm
from app import client
from app import clientP128, clientP4096, clientP131072
from app import clientT, clientS
from app import clientSSO, clientTNS
from app import clientU, clientUV, nlimit
from app import APIURL
from apps.utils import format_hbase_output
from apps.utils import extract_cutouts
from apps.utils import get_superpixels
from apps.plotting import legacy_normalizer, convolve, sigmoid_normalizer
import io
import requests
import java
import gzip
import healpy as hp
import pandas as pd
import numpy as np
import astropy.units as u
from astropy.time import Time, TimeDelta
from astropy.coordinates import SkyCoord
from astropy.table import Table
from astropy.io import fits
from flask import Blueprint
api_bp = Blueprint('', __name__)
api_doc_summary = """
# Fink API
## Summary of services
| HTTP Method | URI | Action | Availability |
|-------------|-----|--------|--------------|
| POST/GET | {}/api/v1/objects| Retrieve single object data from the Fink database | ☑️ |
| POST/GET | {}/api/v1/explorer | Query the Fink alert database | ☑️ |
| POST/GET | {}/api/v1/latests | Get latest alerts by class | ☑️ |
| POST/GET | {}/api/v1/sso | Get Solar System Object data | ☑️ |
| POST/GET | {}/api/v1/cutouts | Retrieve cutout data from the Fink database| ☑️ |
| POST/GET | {}/api/v1/xmatch | Cross-match user-defined catalog with Fink alert data| ☑️ |
| POST/GET | {}/api/v1/bayestar | Cross-match LIGO/Virgo sky map with Fink alert data| ☑️ |
| GET | {}/api/v1/classes | Display all Fink derived classification | ☑️ |
| GET | {}/api/v1/columns | Display all available alert fields and their type | ☑️ |
""".format(APIURL, APIURL, APIURL, APIURL, APIURL, APIURL, APIURL, APIURL, APIURL)
api_doc_object = """
## Retrieve single object data
The list of arguments for retrieving object data can be found at https://fink-portal.org/api/v1/objects.
In a unix shell, you would simply use
```bash
# Get data for ZTF21aaxtctv and save it in a CSV file
curl -H "Content-Type: application/json" -X POST -d '{"objectId":"ZTF21aaxtctv", "output-format":"csv"}' https://fink-portal.org/api/v1/objects -o ZTF21aaxtctv.csv
```
In python, you would use
```python
import requests
import pandas as pd
# get data for ZTF21aaxtctv
r = requests.post(
'https://fink-portal.org/api/v1/objects',
json={
'objectId': 'ZTF21aaxtctv',
'output-format': 'json'
}
)
# Format output in a DataFrame
pdf = pd.read_json(r.content)
```
Note that for `csv` output, you need to use
```python
# get data for ZTF21aaxtctv in CSV format...
r = ...
pd.read_csv(io.BytesIO(r.content))
```
You can also get a votable using the json output format:
```python
from astropy.table import Table
# get data for ZTF21aaxtctv in JSON format...
r = ...
t = Table(r.json())
```
By default, we transfer all available data fields (original ZTF fields and Fink science module outputs).
But you can also choose to transfer only a subset of the fields:
```python
# select only jd, and magpsf
r = requests.post(
'https://fink-portal.org/api/v1/objects',
json={
'objectId': 'ZTF21aaxtctv',
'columns': 'i:jd,i:magpsf'
}
)
```
Note that the fields should be comma-separated. Unknown field names are ignored.
### Upper limits and bad quality data
You can also retrieve upper limits and bad quality data (as defined by Fink quality cuts)
alongside valid measurements. For this you would use `withupperlim` (see usage below).
Note that the returned data will contained a new column, `d:tag`, to easily check data type:
`valid` (valid alert measurements), `upperlim` (upper limits), `badquality` (alert measurements that did not pass quality cuts).
Here is an example that query the data, and plot it:
```python
import requests
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context('talk')
# get data for ZTF21aaxtctv
r = requests.post(
'https://fink-portal.org/api/v1/objects',
json={
'objectId': 'ZTF21aaxtctv',
'withupperlim': 'True'
}
)
# Format output in a DataFrame
pdf = pd.read_json(r.content)
fig = plt.figure(figsize=(15, 6))
colordic = {1: 'C0', 2: 'C1'}
for filt in np.unique(pdf['i:fid']):
maskFilt = pdf['i:fid'] == filt
# The column `d:tag` is used to check data type
maskValid = pdf['d:tag'] == 'valid'
plt.errorbar(
pdf[maskValid & maskFilt]['i:jd'].apply(lambda x: x - 2400000.5),
pdf[maskValid & maskFilt]['i:magpsf'],
pdf[maskValid & maskFilt]['i:sigmapsf'],
ls = '', marker='o', color=colordic[filt]
)
maskUpper = pdf['d:tag'] == 'upperlim'
plt.plot(
pdf[maskUpper & maskFilt]['i:jd'].apply(lambda x: x - 2400000.5),
pdf[maskUpper & maskFilt]['i:diffmaglim'],
ls='', marker='^', color=colordic[filt], markerfacecolor='none'
)
maskBadquality = pdf['d:tag'] == 'badquality'
plt.errorbar(
pdf[maskBadquality & maskFilt]['i:jd'].apply(lambda x: x - 2400000.5),
pdf[maskBadquality & maskFilt]['i:magpsf'],
pdf[maskBadquality & maskFilt]['i:sigmapsf'],
ls='', marker='v', color=colordic[filt]
)
plt.gca().invert_yaxis()
plt.xlabel('Modified Julian Date')
plt.ylabel('Magnitude')
plt.show()
```

### Cutouts
Finally, you can also request data from cutouts stored in alerts (science, template and difference).
Simply set `withcutouts` in the json payload (string):
```python
import requests
import pandas as pd
import matplotlib.pyplot as plt
# transfer cutout data
r = requests.post(
'https://fink-portal.org/api/v1/objects',
json={
'objectId': 'ZTF21aaxtctv',
'withcutouts': 'True'
}
)
# Format output in a DataFrame
pdf = pd.read_json(r.content)
columns = [
'b:cutoutScience_stampData',
'b:cutoutTemplate_stampData',
'b:cutoutDifference_stampData'
]
for col in columns:
# 2D array
data = pdf[col].values[0]
# do whatever plotting
plt.show()
```
See [here](https://github.com/astrolabsoftware/fink-science-portal/blob/1dea22170449f120d92f404ac20bbb856e1e77fc/apps/plotting.py#L584-L593) how we do in the Science Portal to display cutouts.
Note that you need to flip the array to get the correct orientation on sky (`data[::-1]`).
"""
api_doc_explorer = """
## Query the Fink alert database
This service allows you to search matching objects in the database.
If several alerts from the same object match the query, we group information and
only display the data from the last alert. To get a full history about an object,
you should use the `Retrieve single object data` service instead.
Currently, you cannot query using several conditions.
You must choose among `Search by Object ID` (group 0), `Conesearch` (group 1), or `Search by Date` (group 2).
In a future release, you will be able to combine searches.
The list of arguments for querying the Fink alert database can be found at https://fink-portal.org/api/v1/explorer.
### Search by Object ID
Enter a valid object ID to access its data, e.g. try:
* ZTF21abfmbix, ZTF21aaxtctv, ZTF21abfaohe, ZTF20aanxcpf, ZTF17aaaabte, ZTF18aafpcwm, ZTF21abujbqa, ZTF21abuipwb, ZTF18acuajcr
In a unix shell, you would simply use
```bash
# Get data for ZTF21aaxtctv and save it in a JSON file
curl -H "Content-Type: application/json" -X POST -d '{"objectId":"ZTF21aaxtctv"}' https://fink-portal.org/api/v1/explorer -o search_ZTF21aaxtctv.json
```
In python, you would use
```python
import requests
import pandas as pd
# get data for ZTF21aaxtctv
r = requests.post(
'https://fink-portal.org/api/v1/explorer',
json={
'objectId': 'ZTF21aaxtctv',
}
)
# Format output in a DataFrame
pdf = pd.read_json(r.content)
```
### Conesearch
Perform a conesearch around a position on the sky given by (RA, Dec, radius).
The initializer for RA/Dec is very flexible and supports inputs provided in a number of convenient formats.
The following ways of initializing a conesearch are all equivalent (radius in arcsecond):
* 193.822, 2.89732, 5
* 193d49m18.267s, 2d53m50.35s, 5
* 12h55m17.218s, +02d53m50.35s, 5
* 12 55 17.218, +02 53 50.35, 5
* 12:55:17.218, 02:53:50.35, 5
In a unix shell, you would simply use
```bash
# Get all objects falling within (center, radius) = ((ra, dec), radius)
curl -H "Content-Type: application/json" -X POST -d '{"ra":"193.822", "dec":"2.89732", "radius":"5"}' https://fink-portal.org/api/v1/explorer -o conesearch.json
```
In python, you would use
```python
import requests
import pandas as pd
# Get all objects falling within (center, radius) = ((ra, dec), radius)
r = requests.post(
'https://fink-portal.org/api/v1/explorer',
json={
'ra': '193.822',
'dec': '2.89732',
'radius': '5'
}
)
# Format output in a DataFrame
pdf = pd.read_json(r.content)
```
Maximum radius length is 18,000 arcseconds (5 degrees). Note that in case of
several objects matching, the results will be sorted according to the column
`v:separation_degree`, which is the angular separation in degree between
the input (ra, dec) and the objects found.
In addition, you can specify time boundaries:
```python
import requests
import pandas as pd
# Get all objects falling within (center, radius) = ((ra, dec), radius)
# between 2021-06-25 05:59:37.000 (included) and 2021-07-01 05:59:37.000 (excluded)
r = requests.post(
'https://fink-portal.org/api/v1/explorer',
json={
'ra': '193.822',
'dec': '2.89732',
'radius': '5',
'startdate_conesearch': '2021-06-25 05:59:37.000',
'window_days_conesearch': 7
}
)
# Format output in a DataFrame
pdf = pd.read_json(r.content)
```
Here is the performance of the service for querying a
single object (database of 1.3TB, about 40 million alerts):

_circle marks with dashed lines are results for a full scan search
(~2 years of data, 40 million alerts), while the upper triangles with
dotted lines are results when restraining to 7 days search.
The numbers close to markers show the number of objects returned by the conesearch._
### Search by Date
Choose a starting date and a time window to see all alerts in this period.
Dates are in UTC, and the time window in minutes.
Example of valid search:
* 2021-07-01 05:59:37.000
In a unix shell, you would simply use
```bash
# Get all objects between 2021-07-01 05:59:37.000 and 2021-07-01 06:09:37.000 UTC
curl -H "Content-Type: application/json" -X POST -d '{"startdate":"2021-07-01 05:59:37.000", "window":"10"}' https://fink-portal.org/api/v1/explorer -o datesearch.json
```
In python, you would use
```python
import requests
import pandas as pd
# Get all objects between 2021-07-01 05:59:37.000 and 2021-07-01 06:09:37.000 UTC
r = requests.post(
'https://fink-portal.org/api/v1/explorer',
json={
'startdate': '2021-07-01 05:59:37.000',
'window': '10'
}
)
# Format output in a DataFrame
pdf = pd.read_json(r.content)
```
"""
api_doc_latests = """
## Get latest alerts by class
The list of arguments for getting latest alerts by class can be found at https://fink-portal.org/api/v1/latests.
The list of Fink class can be found at https://fink-portal.org/api/v1/classes
```bash
# Get list of available class in Fink
curl -H "Content-Type: application/json" -X GET https://fink-portal.org/api/v1/classes -o finkclass.json
```
To get the last 5 candidates of the class `Early SN Ia candidate`, you would simply use in a unix shell:
```bash
# Get latests 5 Early SN Ia candidates
curl -H "Content-Type: application/json" -X POST -d '{"class":"Early SN Ia candidate", "n":"5"}' https://fink-portal.org/api/v1/latests -o latest_five_sn_candidates.json
```
In python, you would use
```python
import requests
import pandas as pd
# Get latests 5 Early SN Ia candidates
r = requests.post(
'https://fink-portal.org/api/v1/latests',
json={
'class': 'Early SN Ia candidate',
'n': '5'
}
)
# Format output in a DataFrame
pdf = pd.read_json(r.content)
```
Note that for `csv` output, you need to use
```python
# get latests in CSV format...
r = ...
pd.read_csv(io.BytesIO(r.content))
```
You can also specify `startdate` and `stopdate` for your search:
```python
import requests
import pandas as pd
# Get all classified SN Ia from TNS between March 1st 2021 and March 5th 2021
r = requests.post(
'https://fink-portal.org/api/v1/latests',
json={
'class': '(TNS) SN Ia',
'n': '100',
'startdate': '2021-03-01',
'stopdate': '2021-03-05'
}
)
# Format output in a DataFrame
pdf = pd.read_json(r.content)
```
There is no limit of time, but you will be limited by the
number of alerts retrieve on the server side `n` (current max is 1000).
"""
api_doc_sso = """
## Retrieve Solar System Object data
The list of arguments for retrieving SSO data can be found at https://fink-portal.org/api/v1/sso.
The numbers or designations are taken from the MPC archive.
When searching for a particular asteroid or comet, it is best to use the IAU number,
as in 4209 for asteroid "4209 Briggs". You can also try for numbered comet (e.g. 10P),
or interstellar object (none so far...). If the number does not yet exist, you can search for designation.
Here are some examples of valid queries:
* Asteroids by number (default)
* Asteroids (Main Belt): 4209, 1922
* Asteroids (Hungarians): 18582, 77799
* Asteroids (Jupiter Trojans): 4501, 1583
* Asteroids (Mars Crossers): 302530
* Asteroids by designation (if number does not exist yet)
* 2010JO69, 2017AD19, 2012XK111
* Comets by number (default)
* 10P, 249P, 124P
* Comets by designation (if number does no exist yet)
* C/2020V2, C/2020R2
Note for designation, you can also use space (2010 JO69 or C/2020 V2).
In a unix shell, you would simply use
```bash
# Get data for the asteroid 4209 and save it in a CSV file
curl -H "Content-Type: application/json" -X POST -d '{"n_or_d":"4209", "output-format":"csv"}' https://fink-portal.org/api/v1/sso -o 4209.csv
```
In python, you would use
```python
import requests
import pandas as pd
# get data for ZTF21aaxtctv
r = requests.post(
'https://fink-portal.org/api/v1/sso',
json={
'n_or_d': '4209',
'output-format': 'json'
}
)
# Format output in a DataFrame
pdf = pd.read_json(r.content)
```
Note that for `csv` output, you need to use
```python
# get data for asteroid 4209 in CSV format...
r = ...
pd.read_csv(io.BytesIO(r.content))
```
You can also get a votable using the json output format:
```python
from astropy.table import Table
# get data for asteroid 4209 in JSON format...
r = ...
t = Table(r.json())
```
By default, we transfer all available data fields (original ZTF fields and Fink science module outputs).
But you can also choose to transfer only a subset of the fields:
```python
# select only jd, and magpsf
r = requests.post(
'https://fink-portal.org/api/v1/sso',
json={
'n_or_d': '4209',
'columns': 'i:jd,i:magpsf'
}
)
```
Note that the fields should be comma-separated. Unknown field names are ignored.
"""
api_doc_cutout = """
## Retrieve cutout data from the Fink database
The list of arguments for retrieving cutout data can be found at https://fink-portal.org/api/v1/cutouts.
### PNG
In a unix shell, you can retrieve the last cutout of an object by simply using
```bash
curl -H "Content-Type: application/json" \\
-X POST -d \\
'{"objectId":"ZTF21aaxtctv", "kind":"Science"}' \\
https://fink-portal.org/api/v1/cutouts -o cutoutScience.png
```
This will retrieve the `Science` image and save it on `cutoutScience.png`.
In Python, the equivalent script would be:
```python
import io
import requests
from PIL import Image as im
# get data for ZTF21aaxtctv
r = requests.post(
'https://fink-portal.org/api/v1/cutouts',
json={
'objectId': 'ZTF21aaxtctv',
'kind': 'Science',
}
)
image = im.open(io.BytesIO(r.content))
image.save('cutoutScience.png')
```
Note you can choose between the `Science`, `Template`, or `Difference` images.
You can also customise the image treatment by
```python
import io
import requests
from PIL import Image as im
# get data for ZTF21aaxtctv
r = requests.post(
'https://fink-portal.org/api/v1/cutouts',
json={
'objectId': 'ZTF21aaxtctv',
'kind': 'Science', # Science, Template, Difference
'stretch': 'sigmoid', # sigmoid[default], linear, sqrt, power, log, asinh
'colormap': 'viridis', # Valid matplotlib colormap name (see matplotlib.cm). Default is grayscale.
'pmin': 0.5, # The percentile value used to determine the pixel value of minimum cut level. Default is 0.5. No effect for sigmoid.
'pmax': 99.5, # The percentile value used to determine the pixel value of maximum cut level. Default is 99.5. No effect for sigmoid.
'convolution_kernel': 'gauss' # Convolve the image with a kernel (gauss or box). Default is None (not specified).
}
)
image = im.open(io.BytesIO(r.content))
image.save('mysupercutout.png')
```
By default, you will retrieve the cutout of the last alert emitted for the object `objectId`.
You can also access cutouts of other alerts from this object by specifying their candidate ID:
```python
import io
import requests
import pandas as pd
from PIL import Image as im
# Get all candidate ID with JD for ZTF21aaxtctv
r = requests.post(
'https://fink-portal.org/api/v1/objects',
json={
'objectId': 'ZTF21aaxtctv',
'columns': 'i:candid,i:jd'
}
)
pdf_candid = pd.read_json(r.content)
# Get the first alert
first_alert = pdf_candid['i:candid'].values[-1]
# get data for ZTF21aaxtctv
r = requests.post(
'https://fink-portal.org/api/v1/cutouts',
json={
'objectId': 'ZTF21aaxtctv',
'kind': 'Science',
'candid': first_alert
}
)
image = im.open(io.BytesIO(r.content))
image.save('mysupercutout_firstalert.png')
```
### FITS
You can also retrieve the original FITS file stored in the alert:
```bash
curl -H "Content-Type: application/json" \\
-X POST -d \\
'{"objectId":"ZTF21aaxtctv", "kind":"Science", "output-format": "FITS"}' \\
https://fink-portal.org/api/v1/cutouts -o cutoutScience.fits
```
or equivalently in Python:
```python
import io
from astropy.io import fits
import requests
import pandas as pd
# get data for ZTF21aaxtctv
r = requests.post(
'https://fink-portal.org/api/v1/cutouts',
json={
'objectId': 'ZTF21aaxtctv',
'kind': 'Science',
'output-format': 'FITS'
}
)
data = fits.open(io.BytesIO(r.content))
data.writeto('cutoutScience.fits')
```
### Numpy array
You can also retrieve only the data block stored in the alert:
```python
import requests
import pandas as pd
# get data for ZTF21aaxtctv
r = requests.post(
'https://fink-portal.org/api/v1/cutouts',
json={
'objectId': 'ZTF21aaxtctv',
'kind': 'Science',
'output-format': 'array'
}
)
pdf = pd.read_json(r.content)
array = pdf['b:cutoutScience_stampData'].values[0]
```
"""
api_doc_xmatch = """
## Xmatch with catalogs
The list of arguments for retrieving object data can be found at https://fink-portal.org/api/v1/xmatch.
Let's assume you have a catalog on disk (CSV format), you would use:
```python
import requests
import pandas as pd
r = requests.post(
'https://fink-portal.org/api/v1/xmatch',
json={
'catalog': open('mycatalog.csv').read(),
'header': 'RA,Dec,ID',
'radius': 1.5, # in arcsecond
'window': 7 # in days
}
)
# Format output in a DataFrame
pdf = pd.read_json(r.content)
```
The crossmatch service is a wrapper around the conesearch service.
Here is the current performance of the service for querying a
single object (1.3TB, about 40 million alerts):

_circle marks with dashed lines are results for a full scan search
(~2 years of data, 40 million alerts), while the upper triangles with
dotted lines are results when restraining to 7 days search.
The numbers close to markers show the number of objects returned by the conesearch._
The catalog format must be CSV, and it is assumed that the first line is the header,
and then each line is an object, e.g.
```
ID,Time,RA,Dec,otherproperty
210430A,2021-04-30 10:42:10,57.185,45.080,toto
210422A,2021-04-22 17:47:10,21.077,42.100,tutu
210421B,2021-04-21 10:54:44,270.817,56.828,tutu
210421A,2021-04-21 00:27:30,104.882,4.928,toto
210420B,2021-04-20 18:34:37,254.313,42.558,foo
210419C,2021-04-19 23:27:49,212.969,36.011,bar
AnObjectMatching,2019-11-02 02:51:12.001,271.3914265,45.2545134,foo
```
The argument `header` is the comma-separated names of the columns matching
RA, Dec, ID and Time (in this order). So if your catalog header is
```
aproperty,myID,detection time,RA(J2000),Dec(J2000),otherproperty
x,210430A,2021-04-30 10:42:10,57.185,45.080,toto
y,210422A,2021-04-22 17:47:10,21.077,42.100,tutu
```
You would specify:
```python
'header': 'RA(J2000),Dec(J2000),myID,detection time'
```
Note that the `Time` column is optional. You do not need to specify it,
in which case your header argument will be:
```python
'header': 'RA(J2000),Dec(J2000),myID'
```
Note that is is always better to specify the time column as it speeds-up
the computation (instead of performing a full-scan). If you specify the `Time`
column, you can specify the time `window` in days around which we should perform
the cross-match (default is 1 day starting from the time column).
Finally, you can specify the `radius` for the cross-match, in arcsecond. You can
specify any values, with a maximum of 18,000 arcseconds (5 degrees).
Note that in case of several objects matching, the results will be sorted
according to the column `v:separation_degree`, which is the angular separation
in degree between the input (ra, dec) and the objects found.
"""
api_doc_bayestar = """
## Cross-match with LIGO/Virgo sky maps
The list of arguments for retrieving object data can be found at https://fink-portal.org/api/v1/bayestar.
Let's assume you want get all alerts falling inside a given LIGO/Virgo credible region sky map
(retrieved from the GraceDB event page, or distributed via GCN). You would
simply upload the sky map with a threshold, and Fink returns all alerts emitted
within `[-1 day, +6 day]` from the GW event inside the chosen credible region.
Concretely on [S200219ac](https://gracedb.ligo.org/superevents/S200219ac/view/):
```python
# LIGO/Virgo probability sky maps, as gzipped FITS (bayestar.fits.gz)
# S200219ac on 2020-02-19T09:44:15.197173
fn = 'bayestar.fits.gz'
# GW credible region threshold to look for. Note that the values in the resulting
# credible level map vary inversely with probability density: the most probable pixel is
# assigned to the credible level 0.0, and the least likely pixel is assigned the credible level 1.0.
# Area of the 20% Credible Region:
credible_level = 0.2
# Query Fink
data = open(fn, 'rb').read()
r = requests.post(
'https://fink-portal.org/api/v1/bayestar',
json={
'bayestar': str(data),
'credible_level': credible_level,
'output-format': 'json'
}
)
pdf = pd.read_json(r.content)
```
You will get a Pandas DataFrame as usual, with all alerts inside the region (within `[-1 day, +6 day]`).
Here are some statistics on this specific event:
```markdown
| `credible_level` | Sky area | number of alerts returned | Execution time |
|-----------|----------|---------------------------|----------------------|
| 0.2 | 81 deg2 | 121 | 2 to 5 seconds |
| 0.5 | 317 deg2 | 1137 | 10 to 15 seconds|
| 0.9 | 1250 deg2 | 2515 | > 60 seconds |
```
Here is the details of alert classification for a credible level of 0.9:
```
5968 alerts found
v:classification
Unknown 2122
Solar System candidate 2058
QSO 703
SN candidate 259
RRLyr 253
Solar System MPC 172
Seyfert_1 118
EB* 105
Ambiguous 24
Blue 19
Star 18
Galaxy 15
BLLac 12
Radio 10
Candidate_RRLyr 10
SN 8
Seyfert_2 6
PulsV*delSct 5
BClG 5
AGN 5
LPV* 4
EB*Algol 4
RadioG 3
CataclyV* 3
QSO_Candidate 2
X 2
BlueStraggler 2
Candidate_EB* 2
LINER 2
GravLensSystem 2
PM* 2
GinCl 1
EllipVar 1
AMHer 1
Early SN Ia candidate 1
HB* 1
DwarfNova 1
Possible_G 1
Candidate_CV* 1
Nova 1
BYDra 1
WD* 1
Mira 1
low-mass* 1
```
Most of the alerts are actually catalogued. Finally, you can overplot alerts on the sky map:
```python
import healpy as hp
import matplotlib.pyplot as plt
hpx, header_ = hp.read_map(fn, h=True, field=0)
header = {i[0]: i[1] for i in header_}
title = 'Probability sky maps for {}'.format(header['OBJECT'])
hp.mollzoom(hpx, coord='G', title=title)
if len(pdf) > 0:
hp.projscatter(
pdf['i:ra'],
pdf['i:dec'],
lonlat=True,
marker='x',
color='C1',
alpha=0.5
)
hp.graticule()
plt.show()
```

"""
def layout(is_mobile):
if is_mobile:
width = '95%'
else:
width = '80%'
layout_ = html.Div(
[
html.Br(),
html.Br(),
html.Br(),
dbc.Container(
[
dbc.Row(
[
dbc.Card(
dbc.CardBody(
dcc.Markdown(api_doc_summary)
), style={
'backgroundColor': 'rgb(248, 248, 248, .7)'
}
),
]
),
html.Br(),
dbc.Tabs(
[
dbc.Tab(
[
dbc.Card(
dbc.CardBody(
dcc.Markdown(api_doc_object)
), style={
'backgroundColor': 'rgb(248, 248, 248, .7)'
}
),
], label="Retrieve object data"
),
dbc.Tab(
[
dbc.Card(
dbc.CardBody(
dcc.Markdown(api_doc_explorer)
), style={
'backgroundColor': 'rgb(248, 248, 248, .7)'
}
),
], label="Query the database"
),
dbc.Tab(
[
dbc.Card(
dbc.CardBody(
dcc.Markdown(api_doc_latests)
), style={
'backgroundColor': 'rgb(248, 248, 248, .7)'
}
),
], label="Get latest alerts"
),
dbc.Tab(
[
dbc.Card(
dbc.CardBody(
dcc.Markdown(api_doc_sso)
), style={
'backgroundColor': 'rgb(248, 248, 248, .7)'
}
),
], label="Get Solar System Objects"
),
dbc.Tab(
[
dbc.Card(
dbc.CardBody(
dcc.Markdown(api_doc_cutout)
), style={
'backgroundColor': 'rgb(248, 248, 248, .7)'
}
),
], label="Get Image data"
),
dbc.Tab(
[
dbc.Card(
dbc.CardBody(
dcc.Markdown(api_doc_xmatch)
), style={
'backgroundColor': 'rgb(248, 248, 248, .7)'
}
),
], label="Xmatch"
),
dbc.Tab(
[
dbc.Card(
dbc.CardBody(
dcc.Markdown(api_doc_bayestar)
), style={
'backgroundColor': 'rgb(248, 248, 248, .7)'
}
),
], label="Gravitational Waves"
),
]
)
], className="mb-8", fluid=True, style={'width': width}
)
], className='home', style={
'background-image': 'linear-gradient(rgba(255,255,255,0.5), rgba(255,255,255,0.5)), url(/assets/background.png)',
'background-size': 'contain'
}
)
return layout_
args_objects = [
{
'name': 'objectId',
'required': True,
'description': 'ZTF Object ID'
},
{
'name': 'withupperlim',
'required': False,
'description': 'If True, retrieve also upper limit measurements, and bad quality measurements. Use the column `d:tag` in your results: valid, upperlim, badquality.'
},
{
'name': 'withcutouts',
'required': False,
'description': 'If True, retrieve also uncompressed FITS cutout data (2D array).'
},
{
'name': 'columns',
'required': False,
'description': 'Comma-separated data columns to transfer. Default is all columns. See {}/api/v1/columns for more information.'.format(APIURL)
},
{
'name': 'output-format',
'required': False,
'description': 'Output format among json[default], csv, parquet'
}
]
args_explorer = [
{
'name': 'objectId',
'required': True,
'group': 0,
'description': 'ZTF Object ID'
},
{
'name': 'ra',
'required': True,
'group': 1,
'description': 'Right Ascension'
},
{
'name': 'dec',
'required': True,
'group': 1,
'description': 'Declination'
},
{
'name': 'radius',
'required': True,
'group': 1,
'description': 'Conesearch radius in arcsec. Maximum is 36,000 arcseconds (10 degrees).'
},
{
'name': 'startdate_conesearch',
'required': False,
'group': 1,
'description': '[Optional] Starting date in UTC for the conesearch query.'
},
{
'name': 'window_days_conesearch',
'required': False,
'group': 1,
'description': '[Optional] Time window in days for the conesearch query.'
},
{
'name': 'startdate',
'required': True,
'group': 2,
'description': 'Starting date in UTC'
},
{
'name': 'window',
'required': True,
'group': 2,
'description': 'Time window in minutes. Maximum is 180 minutes.'
},
{
'name': 'output-format',
'required': False,
'group': None,
'description': 'Output format among json[default], csv, parquet'
}
]
args_latest = [
{
'name': 'class',
'required': True,
'description': 'Fink derived class'
},
{
'name': 'n',
'required': False,
'description': 'Last N alerts to transfer between stopping date and starting date. Default is 10, max is 1000.'
},
{
'name': 'startdate',
'required': False,
'description': 'Starting date in UTC (iso, jd, or MJD). Default is 2019-11-01 00:00:00'
},
{
'name': 'stopdate',
'required': False,
'description': 'Stopping date in UTC (iso, jd, or MJD). Default is now.'
},
{
'name': 'output-format',
'required': False,
'description': 'Output format among json[default], csv, parquet'
}
]
args_sso = [
{
'name': 'n_or_d',
'required': False,
'description': 'IAU number of the object, or designation of the object IF the number does not exist yet. Example for numbers: 4209 (asteroid) or 10P (comet). Example for designations: 2010JO69 (asteroid) or C/2020V2 (comet).'
},
{
'name': 'columns',
'required': False,
'description': 'Comma-separated data columns to transfer. Default is all columns. See {}/api/v1/columns for more information.'.format(APIURL)
},
{
'name': 'output-format',
'required': False,
'description': 'Output format among json[default], csv, parquet'
}
]
args_cutouts = [
{
'name': 'objectId',
'required': True,
'description': 'ZTF Object ID'
},
{
'name': 'kind',
'required': True,
'description': 'Science, Template, or Difference'
},
{
'name': 'output-format',
'required': False,
'description': 'PNG[default], FITS, array'
},
{
'name': 'candid',
'required': False,
'description': 'Candidate ID of the alert belonging to the object with `objectId`. If not filled, the cutouts of the latest alert is returned'
},
{
'name': 'stretch',
'required': False,
'description': 'Stretch function to be applied. Available: sigmoid[default], linear, sqrt, power, log, asinh.'
},
{
'name': 'colormap',
'required': False,
'description': 'Valid matplotlib colormap name (see matplotlib.cm). Default is grayscale.'
},
{
'name': 'pmin',
'required': False,
'description': 'The percentile value used to determine the pixel value of minimum cut level. Default is 0.5. No effect for sigmoid.'
},
{
'name': 'pmax',
'required': False,
'description': 'The percentile value used to determine the pixel value of maximum cut level. Default is 99.5. No effect for sigmoid.'
},
{
'name': 'convolution_kernel',
'required': False,
'description': 'Convolve the image with a kernel (gauss or box). Default is None (not specified).'
}
]
args_xmatch = [
{
'name': 'catalog',
'required': True,
'description': 'External catalog as CSV'
},
{
'name': 'header',
'required': True,
'description': 'Comma separated names of columns corresponding to RA, Dec, ID, Time[optional] in the input catalog.'
},
{
'name': 'radius',
'required': True,
'description': 'Conesearch radius in arcsec. Maximum is 18,000 arcseconds (5 degrees).'
},
{
'name': 'window',
'required': False,
'description': '[Optional] Time window in days.'
},
]
args_bayestar = [
{
'name': 'bayestar',
'required': True,
'description': 'LIGO/Virgo probability sky maps, as gzipped FITS (bayestar.fits.gz)'
},
{
'name': 'credible_level',
'required': True,
'description': 'GW credible region threshold to look for. Note that the values in the resulting credible level map vary inversely with probability density: the most probable pixel is assigned to the credible level 0.0, and the least likely pixel is assigned the credible level 1.0.'
},
{
'name': 'output-format',
'required': False,
'description': 'Output format among json[default], csv, parquet'
}
]
@api_bp.route('/api/v1/objects', methods=['GET'])
def return_object_arguments():
""" Obtain information about retrieving object data
"""
return jsonify({'args': args_objects})
@api_bp.route('/api/v1/objects', methods=['POST'])
def return_object():
""" Retrieve object data from the Fink database
"""
if 'output-format' in request.json:
output_format = request.json['output-format']
else:
output_format = 'json'
# Check all required args are here
required_args = [i['name'] for i in args_objects if i['required'] is True]
for required_arg in required_args:
if required_arg not in request.json:
rep = {
'status': 'error',
'text': "A value for `{}` is required. Use GET to check arguments.\n".format(required_arg)
}
return Response(str(rep), 400)
if 'columns' in request.json:
cols = request.json['columns'].replace(" ", "")
truncated = True
else:
cols = '*'
truncated = False
to_evaluate = "key:key:{}".format(request.json['objectId'])
# We do not want to perform full scan if the objectid is a wildcard
client.setLimit(1000)
results = client.scan(
"",
to_evaluate,
cols,
0, True, True
)
schema_client = client.schema()
# reset the limit in case it has been changed above
client.setLimit(nlimit)
pdf = format_hbase_output(
results, schema_client, group_alerts=False, truncated=truncated
)
if 'withcutouts' in request.json and request.json['withcutouts'] == 'True':
pdf = extract_cutouts(pdf, client)
if 'withupperlim' in request.json and request.json['withupperlim'] == 'True':
# upper limits
resultsU = clientU.scan(
"",
"{}".format(to_evaluate),
"*", 0, False, False
)
# bad quality
resultsUP = clientUV.scan(
"",
"{}".format(to_evaluate),
"*", 0, False, False
)
pdfU = pd.DataFrame.from_dict(resultsU, orient='index')
pdfUP = pd.DataFrame.from_dict(resultsUP, orient='index')
pdf['d:tag'] = 'valid'
pdfU['d:tag'] = 'upperlim'
pdfUP['d:tag'] = 'badquality'
if 'i:jd' in pdfUP.columns:
# workaround -- see https://github.com/astrolabsoftware/fink-science-portal/issues/216
mask = np.array([False if float(i) in pdf['i:jd'].values else True for i in pdfUP['i:jd'].values])
pdfUP = pdfUP[mask]
pdf_ = pd.concat((pdf, pdfU, pdfUP), axis=0)
pdf_['i:jd'] = pdf_['i:jd'].astype(float)
# replace
pdf = pdf_.sort_values('i:jd', ascending=False)
if output_format == 'json':
return pdf.to_json(orient='records')
elif output_format == 'csv':
return pdf.to_csv(index=False)
elif output_format == 'parquet':
f = io.BytesIO()
pdf.to_parquet(f)
f.seek(0)
return f.read()
rep = {
'status': 'error',
'text': "Output format `{}` is not supported. Choose among json, csv, or parquet\n".format(output_format)
}
return Response(str(rep), 400)
@api_bp.route('/api/v1/explorer', methods=['GET'])
def query_db_arguments():
""" Obtain information about querying the Fink database
"""
return jsonify({'args': args_explorer})
@api_bp.route('/api/v1/explorer', methods=['POST'])
def query_db():
""" Query the Fink database
"""
if 'output-format' in request.json:
output_format = request.json['output-format']
else:
output_format = 'json'
# Check the user specifies only one group
all_groups = [i['group'] for i in args_explorer if i['group'] is not None and i['name'] in request.json]
if len(np.unique(all_groups)) != 1:
rep = {
'status': 'error',
'text': "You need to set parameters from the same group\n"
}
return Response(str(rep), 400)
# Check the user specifies all parameters within a group
user_group = np.unique(all_groups)[0]
required_args = [i['name'] for i in args_explorer if i['group'] == user_group]
required = [i['required'] for i in args_explorer if i['group'] == user_group]
for required_arg, required_ in zip(required_args, required):
if (required_arg not in request.json) and required_:
rep = {
'status': 'error',
'text': "A value for `{}` is required for group {}. Use GET to check arguments.\n".format(required_arg, user_group)
}
return Response(str(rep), 400)
if user_group == 0:
# objectId search
to_evaluate = "key:key:{}".format(request.json['objectId'])
# Avoid a full scan
client.setLimit(1000)
results = client.scan(
"",
to_evaluate,
"*",
0, True, True
)
# reset the limit in case it has been changed above
client.setLimit(nlimit)
schema_client = client.schema()
if user_group == 1:
# Interpret user input
ra, dec = request.json['ra'], request.json['dec']
radius = request.json['radius']
if 'startdate_conesearch' in request.json:
startdate = request.json['startdate_conesearch']
else:
startdate = None
if 'window_days_conesearch' in request.json and request.json['window_days_conesearch'] is not None:
window_days = float(request.json['window_days_conesearch'])
else:
window_days = 1.0
if float(radius) > 18000.:
rep = {
'status': 'error',
'text': "`radius` cannot be bigger than 18,000 arcseconds (5 degrees).\n"
}
return Response(str(rep), 400)
if 'h' in str(ra):
coord = SkyCoord(ra, dec, frame='icrs')
elif ':' in str(ra) or ' ' in str(ra):
coord = SkyCoord(ra, dec, frame='icrs', unit=(u.hourangle, u.deg))
else:
coord = SkyCoord(ra, dec, frame='icrs', unit='deg')
ra = coord.ra.deg
dec = coord.dec.deg
radius_deg = float(radius) / 3600.
# angle to vec conversion
vec = hp.ang2vec(np.pi / 2.0 - np.pi / 180.0 * dec, np.pi / 180.0 * ra)
# Send request
if float(radius) <= 30.:
nside = 131072
clientP_ = clientP131072
elif (float(radius) > 30.) & (float(radius) <= 1000.):
nside = 4096
clientP_ = clientP4096
else:
nside = 128
clientP_ = clientP128
pixs = hp.query_disc(
nside,
vec,
np.pi / 180 * radius_deg,
inclusive=True
)
# For the future: we could set clientP_.setRangeScan(True)
# and pass directly the time boundaries here instead of
# grouping by later.
# Filter by time - logic to be improved...
if startdate is not None:
if ':' in str(startdate):
jdstart = Time(startdate).jd
elif str(startdate).startswith('24'):
jdstart = Time(startdate, format='jd').jd
else:
jdstart = Time(startdate, format='mjd').jd
jdend = jdstart + window_days
clientP_.setRangeScan(True)
results = java.util.TreeMap()
for pix in pixs:
to_search = "key:key:{}_{},key:key:{}_{}".format(pix, jdstart, pix, jdend)
result = clientP_.scan(
"",
to_search,
"*",
0, True, True
)
results.putAll(result)
else:
to_evaluate = ",".join(
[
'key:key:{}'.format(i) for i in pixs
]
)
# Get matches in the pixel index table
results = clientP_.scan(
"",
to_evaluate,
"*",
0, True, True
)
# extract objectId and times
objectids = [i[1]['i:objectId'] for i in results.items()]
times = [float(i[1]['key:key'].split('_')[1]) for i in results.items()]
pdf_ = pd.DataFrame({'oid': objectids, 'jd': times})
# Filter by time - logic to be improved...
if startdate is not None:
pdf_ = pdf_[(pdf_['jd'] >= jdstart) & (pdf_['jd'] < jdstart + window_days)]
# groupby and keep only the last alert per objectId
pdf_ = pdf_.loc[pdf_.groupby('oid')['jd'].idxmax()]
# Get data from the main table
results = java.util.TreeMap()
for oid, jd in zip(pdf_['oid'].values, pdf_['jd'].values):
to_evaluate = "key:key:{}_{}".format(oid, jd)
result = client.scan(
"",
to_evaluate,
"*",
0, True, True
)
results.putAll(result)
schema_client = client.schema()
elif user_group == 2:
if int(request.json['window']) > 180:
rep = {
'status': 'error',
'text': "`window` cannot be bigger than 180 minutes.\n"
}
return Response(str(rep), 400)
# Time to jd
jd_start = Time(request.json['startdate']).jd
jd_end = jd_start + TimeDelta(int(request.json['window']) * 60, format='sec').jd
# Send the request. RangeScan.
clientT.setRangeScan(True)
to_evaluate = "key:key:{},key:key:{}".format(jd_start, jd_end)
results = clientT.scan(
"",
to_evaluate,
"*",
0, True, True
)
schema_client = clientT.schema()
# reset the limit in case it has been changed above
client.setLimit(nlimit)
pdfs = format_hbase_output(
results,
schema_client,
group_alerts=True,
extract_color=False
)
# For conesearch, sort by distance
if (user_group == 1) and (len(pdfs) > 0):
sep = coord.separation(
SkyCoord(
pdfs['i:ra'],
pdfs['i:dec'],
unit='deg'
)
).deg
pdfs['v:separation_degree'] = sep
pdfs = pdfs.sort_values('v:separation_degree', ascending=True)
mask = pdfs['v:separation_degree'] > radius_deg
pdfs = pdfs[~mask]
if output_format == 'json':
return pdfs.to_json(orient='records')
elif output_format == 'csv':
return pdfs.to_csv(index=False)
elif output_format == 'parquet':
f = io.BytesIO()
pdfs.to_parquet(f)
f.seek(0)
return f.read()
rep = {
'status': 'error',
'text': "Output format `{}` is not supported. Choose among json, csv, or parquet\n".format(request.json['output-format'])
}
return Response(str(rep), 400)
@api_bp.route('/api/v1/latests', methods=['GET'])
def latest_objects_arguments():
""" Obtain information about latest objects
"""
return jsonify({'args': args_latest})
@api_bp.route('/api/v1/latests', methods=['POST'])
def latest_objects():
""" Get latest objects by class
"""
if 'output-format' in request.json:
output_format = request.json['output-format']
else:
output_format = 'json'
# Check all required args are here
required_args = [i['name'] for i in args_latest if i['required'] is True]
for required_arg in required_args:
if required_arg not in request.json:
rep = {
'status': 'error',
'text': "A value for `{}` is required. Use GET to check arguments.\n".format(required_arg)
}
return Response(str(rep), 400)
if 'n' not in request.json:
nalerts = 10
else:
nalerts = int(request.json['n'])
if 'startdate' not in request.json:
# start of the Fink operations
jd_start = Time('2019-11-01 00:00:00').jd
else:
jd_start = Time(request.json['startdate']).jd
if 'stopdate' not in request.json:
jd_stop = Time.now().jd
else:
jd_stop = Time(request.json['stopdate']).jd
# Search for latest alerts for a specific class
tns_classes =
|
pd.read_csv('assets/tns_types.csv', header=None)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(
|
StringIO(self.ts_data)
|
pandas.compat.StringIO
|
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import (Mail, Attachment, FileContent, FileName, FileType, Disposition, ContentId)
import base64
import plotly.graph_objects as go
import os, time, csv, datetime, requests, json, matplotlib, time
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import pandas as pd
from scipy import stats
from dotenv import load_dotenv
from pandas import read_csv
all_states=["AK", "AL", "AR", "AZ", "CA", "CO", "CT", "DE", "FL", "GA", "HI", "IA", "ID", "IL", "IN", "KS", "KY", "LA", "MA", "MD", "ME", "MI", "MN", "MS", "MO", "MT", "NC", "ND", "NE", "NH", "NJ", "NM", "NV", "NY", "OH", "OK", "OR", "PA", "RI", "SC", "SD", "TN", "TX", "UT", "VA", "VT", "WA", "WI", "WV", "WY"]
#This function generates the right series_id depending on the scenario
def hashgen(state_id, dt_code):
series_id = list(range(6))
series_id[1] = (state_id) + "UR" #Unemployment by State: STD, Monthly data, June 21
series_id[2] = (state_id) + "RQGSP" #GDP by State: STD, Qtrly data, Jan 21
series_id[3] = (state_id) + "POP" #Resident population by State: STD, Annual data, Jan 20
series_id[4] = "MEDLISPRI" + (state_id) #Median Listing Price by State: STD, Monthly data, June 21
series_id[5] = "MEHOINUS" + (state_id) + "A646N" #Median Household income by State: STD, Annual data, Jan 19
return series_id[dt_code]
#This function pulls a nicely framed DF for certain state and scenario
def request(series_id, api_key, yrs):
FRED_URL = f"https://api.stlouisfed.org/fred/series/observations?series_id={series_id}&api_key={api_key}&file_type=json"
FRED_request = requests.get(FRED_URL)
FRED_data = json.loads(FRED_request.text)
data = FRED_data["observations"]
df = pd.DataFrame(data)
df.date=pd.to_datetime(df.date)
cutoff_dt = df.date.max() -
|
pd.DateOffset(years=yrs)
|
pandas.DateOffset
|
import datetime
import random
import re
from typing import List, Text
import pandas as pd
import numpy as np
from tqdm import tqdm
from transformers import AutoTokenizer
''' '''
class PreprocessFirewall(object):
def __init__(self, logs: List[Text]) -> None:
self.logs = logs
@staticmethod
def _cleanTimelineMessage(l: Text):
l = re.sub(r'^.*?%ASA-\w+-\d-\d+:', '', l)
# OR
l = re.sub(r'^.*?%ASA--\d-\d+:', '', l)
# OR
#l = re.sub(r'^.*?%ASA--\d-\d+:', '', l)
# %ASA-bridge-6-1100
# there are some messages
# started with %ASA--4-733:
# started with %ASA-session-\d-\d+::
# manually omitted
return l
@staticmethod
def _cleanParanthesis(l: Text):
'''for cleaning extra IP information '''
l = re.sub(r'\(([^()]*)\)', '', l)
return l
@staticmethod
def _info_bracket_fix(l: Text):
''' clean bracket around information '''
xxx_match = [xxx.group() for xxx in re.finditer(r"(\[)[a-z ]+(\])",l)]
xxx_bound = [xxx.groups() for xxx in re.finditer(r"(\[)[a-z ]+(\])",l)]
xxx_out = l
if len(xxx_match)>0:
for xxxx in xxx_bound[0]:
xxx_out = xxx_out.replace(xxxx,"")
return xxx_out
@staticmethod
def _clean_HEX(l: Text):
xxx = re.sub(r"((?<=[^A-Za-z0-9])|^)(0x[a-f0-9A-F]+)((?=[^A-Za-z0-9])|$)","",l)
xxx = re.sub("\[, \]","",xxx)
return xxx
@staticmethod
def _augment_some_special_chars(l: Text):
xx = re.sub("\B_\B"," ",l)
xx = re.sub("->","to",xx)
return xx
@staticmethod
def _cleanlefovers(l: Text):
'''for cleaning extra brackets for hexadecimal number '''
l = re.sub(r'[\[\],]', '', l)
return l
@staticmethod
def _fix_missing_IP(l: Text):
''' if there is x in it, attain some number to IT '''
# ^ start of the line
# $ end of the line
REGEX_parts = r"(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z])"
R = re.compile(REGEX_parts, re.S)
set_x = random.randrange(1, 256, 1)
l_edited = R.sub(lambda m: m.group().replace('.x', str(set_x), 1), l)
return l_edited
@staticmethod
def _fix_range_IP(l: Text):
''' if there is range in it, attain some number to IT '''
# ^ start of the line
# $ end of the line
REGEX_parts = r"(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.((\d|\d\d+|1\d\d+|2[0-4]\d)\-(1\d\d*|2[0-4]\d|250))"
R = re.compile(REGEX_parts, re.S)
l_edited = R.sub(lambda m: m.group().replace(
m[4], str(random.randrange(int(m[5]), int(m[6]), 1)), 1), l)
return l_edited
@staticmethod
def _fix_emptystrings(l: Text):
''' removes extra empty lines '''
xx = re.sub(r"^[ \t\r\n]+|[ \t\r\n]+$","",l)
xx = re.sub(r"\s{2,}"," ",l)
return xx
#@staticmethod
#def _getTime(l: Text):
# ''' extracts time from log line '''
# found = re.findall(
# "((00|[0-9]|1[0-9]|2[0-3]):([0-9]*|[0-5][0-9]):([0-9]*|[0-5][0-9]))", l.strip())
# foundTime = datetime.time(
# int(found[0][1]), int(found[0][2]), int(found[0][3]))
# return foundTime
def clean(self, l: Text):
# line edited - le
le = self._cleanTimelineMessage(l)
le = self._cleanParanthesis(le)
le = le.lower()
le = self._clean_HEX(le)
le = self._info_bracket_fix(le)
le = self._augment_some_special_chars(le)
le = self._fix_missing_IP(le)
le = self._fix_range_IP(le)
le = self._fix_emptystrings(le)
return le
def save(self):
''' save log file as csv '''
try:
import pandas as pd
from tqdm import tqdm
except ImportError:
raise
print('Saving the file...')
df = {'time': [], 'log': []}
# For every sentence...
tn = self.__len__() # total number of logs
print(f'Total number of logs: {tn}')
stride = 1 # a step for tqdm
# checking up file through a file viewer is important so we limit row length with 1M
LIMIT = 1000000
startTime = datetime.datetime.now().strftime("%H_%M_%S")
parts = int(tn/LIMIT)
residual = tn % LIMIT
parts = parts+1 if residual > 0 else parts
for part in range(0, parts, stride):
df = {'time': [], 'log': []}
start = part*LIMIT
end = start+LIMIT if tn-start > LIMIT else start+residual
print(f'Part-{part+1} working...')
for i in tqdm(range(start, end, stride)):
line = self.logs[i]
# if there is a match
foundTime = self._getTime(line)
# line edited - le
le = self.clean(line)
df['log'].append(le)
df['time'].append(foundTime)
df = pd.DataFrame(data=df, columns=['time', 'log'])
df.to_csv("data/firewall/anomaly-log-part-" + str(part+1) + "-" +
startTime+".csv", index=False, header=True)
del(df)
def __getitem__(self, idx):
#time = self._getTime(self.logs[idx])
text = self.clean(self.logs[idx])
#return (time, text)
return text
def __len__(self):
return len(self.logs)
def main():
#bids_logs = PreprocessFirewall(out['log'].tolist())
# DAY1
p1 = pd.read_csv("data/firewall/anomaly/day1-labeled-part1.csv", sep=',')
p2 = pd.read_csv("data/firewall/anomaly/day1-labeled-part2.csv", sep=',')
p3 = pd.read_csv("data/firewall/anomaly/day1-labeled-part3.csv", sep=',')
p4 = pd.read_csv("data/firewall/anomaly/day1-labeled-part4.csv", sep=',')
p5 = pd.read_csv("data/firewall/anomaly/day1-labeled-part5.csv", sep=',')
p6 = pd.read_csv("data/firewall/anomaly/day1-labeled-part6.csv", sep=',')
p7 = pd.read_csv("data/firewall/anomaly/day1-labeled-part7.csv", sep=',')
p8 = pd.read_csv("data/firewall/anomaly/day1-labeled-part8.csv", sep=',')
p9 = pd.read_csv("data/firewall/anomaly/day1-labeled-part9.csv", sep=',')
p10 = pd.read_csv("data/firewall/anomaly/day1-labeled-part10.csv", sep=',')
p11 = pd.read_csv("data/firewall/anomaly/day1-labeled-part11.csv", sep=',')
p12 = pd.read_csv("data/firewall/anomaly/day1-labeled-part12.csv", sep=',')
whole_day1 = [p1, p2, p3, p4, p5, p6, p7, p8, p9,p10,p11,p12]
df_day1 = pd.concat(whole_day1)
#df_day1.info()
#df_day1['atype'] = np.where(df_day1.label == 1, 'Collective','-')
df_day1['type'] = np.where(df_day1.label == 1, 'DDOS','NORMAL')
# DAY 2
p1 = pd.read_csv("data/firewall/anomaly/day2-labeled-part1.csv", sep=',')
p2 = pd.read_csv("data/firewall/anomaly/day2-labeled-part2.csv", sep=',')
p3 =
|
pd.read_csv("data/firewall/anomaly/day2-labeled-part3.csv", sep=',')
|
pandas.read_csv
|
import os
from ast import literal_eval
from datetime import datetime
import numpy as np
import pandas as pd
from lob_data_utils import roc_results
from scipy.stats import norm
def gdf_representation(buy_orders, sell_orders, gdf):
buy_price, buy_volume = buy_orders
sell_price, sell_volume = sell_orders
buy_gdf_y = gdf[0] * norm.pdf(buy_price, loc=gdf[1], scale=gdf[2])
sell_gdf_y = gdf[0] * norm.pdf(sell_price, loc=gdf[1], scale=gdf[2])
return np.clip(buy_volume, 0.0, buy_gdf_y).sum() + np.clip(sell_volume, 0.0, sell_gdf_y).sum()
def transform_to_orders(df: pd.DataFrame, gdfs, K) -> pd.DataFrame:
order_list = []
df.index = df['Unnamed: 0']
df.index = pd.to_datetime(df.index)
rng = pd.date_range(min(df.index), max(df.index), freq='d')
# df = df.loc[str(rng[1].date()):]
for idx, row in df.iterrows():
try:
d_bid = np.array([literal_eval(row.get('bid_norm'))][0])
d_ask = np.array([literal_eval(row.get('ask_norm'))][0])
d_bid_prices = d_bid[:, 0]
d_ask_prices = d_ask[:, 0]
d_bid_volumes = d_bid[:, 1]
d_ask_volumes = d_ask[:, 1]
except Exception as e:
print(e)
raise e
new_row_dict = {}
for i in range(0, K):
gdf_repr = gdf_representation((d_bid_prices, d_bid_volumes),
(d_ask_prices, d_ask_volumes),
gdfs[i, :])
new_row_dict['gdf_' + str(i)] = gdf_repr
new_row_dict['mid_price'] = row.get('mid_price')
new_row_dict['mid_price_indicator'] = row.get('mid_price_indicator')
new_row_dict['datetime'] = row.get('datetime')
new_row_dict['queue_imbalance'] = row.get('queue_imbalance')
order_list.append(new_row_dict)
order_df =
|
pd.DataFrame(order_list)
|
pandas.DataFrame
|
import inspect
from copy import deepcopy
import numpy as np
import pandas as pd
from aistac.handlers.abstract_handlers import HandlerFactory
from aistac.intent.abstract_intent import AbstractIntentModel
from ds_discovery.components.commons import Commons
from ds_discovery.managers.models_property_manager import ModelsPropertyManager
__author__ = '<NAME>'
class ModelsIntentModel(AbstractIntentModel):
TRAIN_INTENT_LEVEL = 'train_level'
PREDICT_INTENT_LEVEL = 'predict_level'
def __init__(self, property_manager: ModelsPropertyManager, default_save_intent: bool=None,
default_intent_level: bool=None, order_next_available: bool=None, default_replace_intent: bool=None):
"""initialisation of the Intent class.
:param property_manager: the property manager class that references the intent contract.
:param default_save_intent: (optional) The default action for saving intent in the property manager
:param default_intent_level: (optional) the default level intent should be saved at
:param order_next_available: (optional) if the default behaviour for the order should be next available order
:param default_replace_intent: (optional) the default replace existing intent behaviour
"""
default_save_intent = default_save_intent if isinstance(default_save_intent, bool) else True
default_replace_intent = default_replace_intent if isinstance(default_replace_intent, bool) else True
default_intent_level = default_intent_level if isinstance(default_intent_level, (str, int, float)) else 'A'
default_intent_order = -1 if isinstance(order_next_available, bool) and order_next_available else 0
intent_param_exclude = ['canonical']
intent_type_additions = [np.int8, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64]
super().__init__(property_manager=property_manager, default_save_intent=default_save_intent,
intent_param_exclude=intent_param_exclude, default_intent_level=default_intent_level,
default_intent_order=default_intent_order, default_replace_intent=default_replace_intent,
intent_type_additions=intent_type_additions)
def run_intent_pipeline(self, canonical: pd.DataFrame, intent_levels: [int, str, list]=None, **kwargs):
""" Collectively runs all parameterised intent taken from the property manager against the code base as
defined by the intent_contract.
It is expected that all intent methods have the 'canonical' as the first parameter of the method signature
and will contain 'inplace' and 'save_intent' as parameters.
:param canonical: this is the iterative value all intent are applied to and returned.
:param intent_levels: (optional) an single or list of levels to run, if list, run in order given
:param kwargs: additional kwargs to add to the parameterised intent, these will replace any that already exist
:return Canonical with parameterised intent applied or None if inplace is True
"""
# test if there is any intent to run
return
def register_estimator(self, canonical: pd.DataFrame, target: str, headers: list, class_name: str,
module_name: str, hyper_param: dict=None, test_size: float=None, random_state: int=None,
save_intent: bool=None, model_name: str=None, intent_order: int=None,
replace_intent: bool=None, remove_duplicates: bool=None):
""" registers and fits an estimator model returning the model fit
:param canonical: the model canonical
:param class_name: the name of the model class
:param target: the model target
:param headers: the model features header names
:param hyper_param: (optional) hyper parameters for the model instance
:param test_size: (optional) the size of the test sample (default tp 0.33)
:param random_state: (optional) a random state value for the test sample
:param module_name: (optional) the name of the module
:param save_intent: (optional) if the intent contract should be saved to the property manager
:param model_name: (optional) the name of the model
:param intent_order: (optional) the order in which each intent should run.
If None: default's to -1
if -1: added to a level above any current instance of the intent section, level 0 if not found
if int: added to the level specified, overwriting any that already exist
:param replace_intent: (optional) if the intent method exists at the level, or default level
True - replaces the current intent method with the new
False - leaves it untouched, disregarding the new intent
:param remove_duplicates: (optional) removes any duplicate intent in any level that is identical
:return: CatBoostClassifier.
"""
# resolve intent persist options
_method = inspect.currentframe().f_code.co_name
self._set_intend_signature(self._intent_builder(method=_method, params=locals()),
model_name=model_name, intent_order=intent_order, replace_intent=replace_intent,
remove_duplicates=remove_duplicates, save_intent=save_intent)
# Code block for intent
local_intent = {}
if model_name and self._pm.has_intent(model_name):
local_intent = self._pm.get_intent(level=model_name, intent=_method)
module_name = module_name if isinstance(module_name, str) else local_intent.get('module_name', None)
X = Commons.filter_columns(canonical, headers=headers)
y = Commons.filter_columns(canonical, headers=target)
module = HandlerFactory.get_module(module_name='ds_behavioral')
"""
PRIVATE METHODS SECTION
"""
def _intent_builder(self, method: str, params: dict, exclude: list=None) -> dict:
"""builds the intent_params. Pass the method name and local() parameters
Example:
self._intent_builder(inspect.currentframe().f_code.co_name, **locals())
:param method: the name of the method (intent). can use 'inspect.currentframe().f_code.co_name'
:param params: the parameters passed to the method. use `locals()` in the caller method
:param exclude: (optional) convenience parameter identifying param keys to exclude.
:return: dict of the intent
"""
if not isinstance(params.get('canonical', None), (str, dict)):
exclude = ['canonical']
return super()._intent_builder(method=method, params=params, exclude=exclude)
def _set_intend_signature(self, intent_params: dict, model_name: [int, str]=None, intent_order: int=None,
replace_intent: bool=None, remove_duplicates: bool=None, save_intent: bool=None):
""" sets the intent section in the configuration file. Note: by default any identical intent, e.g.
intent with the same intent (name) and the same parameter values, are removed from any level.
:param intent_params: a dictionary type set of configuration representing a intent section contract
:param model_name: (optional) the model name that groups intent by a reference name
:param intent_order: (optional) the order in which each intent should run.
If None: default's to -1
if -1: added to a level above any current instance of the intent section, level 0 if not found
if int: added to the level specified, overwriting any that already exist
:param replace_intent: (optional) if the intent method exists at the level, or default level
True - replaces the current intent method with the new
False - leaves it untouched, disregarding the new intent
:param remove_duplicates: (optional) removes any duplicate intent in any level that is identical
:param save_intent (optional) if the intent contract should be saved to the property manager
"""
if save_intent or (not isinstance(save_intent, bool) and self._default_save_intent):
if not isinstance(model_name, (str, int)) or not model_name:
raise ValueError(f"if the intent is to be saved then a feature name must be provided")
super()._set_intend_signature(intent_params=intent_params, intent_level=model_name, intent_order=intent_order,
replace_intent=replace_intent, remove_duplicates=remove_duplicates,
save_intent=save_intent)
return
def _get_canonical(self, data: [pd.DataFrame, pd.Series, list, str, dict], header: str=None) -> pd.DataFrame:
if isinstance(data, pd.DataFrame):
return deepcopy(data)
if isinstance(data, dict):
method = data.pop('method', None)
if method is None:
raise ValueError(f"The data dictionary has no 'method' key.")
if str(method).startswith('@generate'):
task_name = data.pop('task_name', None)
if task_name is None:
raise ValueError(f"The data method '@generate' requires a 'task_name' key.")
repo_uri = data.pop('repo_uri', None)
module = HandlerFactory.get_module(module_name='ds_behavioral')
inst = module.SyntheticBuilder.from_env(task_name=task_name, uri_pm_repo=repo_uri, default_save=False)
size = data.pop('size', None)
seed = data.get('seed', None)
run_book = data.pop('run_book', None)
result = inst.tools.run_intent_pipeline(size=size, columns=run_book, seed=seed)
return inst.tools.frame_selection(canonical=result, save_intent=False, **data)
else:
raise ValueError(f"The data 'method' key {method} is not a recognised intent method")
elif isinstance(data, (list, pd.Series)):
header = header if isinstance(header, str) else 'default'
return pd.DataFrame(data=deepcopy(data), columns=[header])
elif isinstance(data, str):
if data == '@empty':
return pd.DataFrame()
if not self._pm.has_connector(connector_name=data):
raise ValueError(f"The data connector name '{data}' is not in the connectors catalog")
handler = self._pm.get_connector_handler(data)
canonical = handler.load_canonical()
if isinstance(canonical, dict):
canonical =
|
pd.DataFrame.from_dict(data=canonical, orient='columns')
|
pandas.DataFrame.from_dict
|
"""
Experiment for XGBoost + RI
Aim: To find the best m and ntree(num_round)
m: [100, 120, 140, 160]
ntree: [140, 160, 180, 200, 220, 240, 260]
Averaging 20 models
Summary
loss ntree
m
100 0.450670 240
120 0.450491 220
140 0.449575 220
160 0.449249 220 *
Time: 2:56:52 on i7-4790k 32G MEM GTX660
I got a different result before I reinstalled ubuntu 14.04 LTS.
loss ntree
m
100 0.450663 240
120 0.449751 220
140 0.448961 220 *
160 0.449046 220
So I chose m=140, ntree=220.
"""
import numpy as np
import scipy as sp
import pandas as pd
from sklearn.cross_validation import StratifiedKFold
from sklearn.metrics import log_loss
from datetime import datetime
import os
import xgboost as xgb
from utility import *
path = os.getcwd() + '/'
path_log = path + 'logs/'
file_train = path + 'train.csv'
training = pd.read_csv(file_train, index_col = 0)
num_train = training.shape[0]
y = training['target'].values
yMat =
|
pd.get_dummies(training['target'])
|
pandas.get_dummies
|
import pandas as pd
from os.path import join
import numpy as np
data_path = r'/data/'
# add neighbors features, predecessor and successor
def featured_neighbors():
# propagate list forth or back by filling NaNs with predecessor value
def fill(s, rev=False):
if rev: s = s[::-1]
filler = 0
for i,v in enumerate(s):
s[i] = filler = filler if np.isnan(v) else v
return s[::-1] if rev else s
t = logFeature.reset_index().join(train, on='id').join(test, on='id',
lsuffix='_train', rsuffix='_test').drop_duplicates('id')
t['x1'] = fill(t['fault_severity'].shift(1).values)
t['x2'] = fill(t['fault_severity'].shift(-1).values, rev=True)
# 'position' - post competition addition
t['location'] = t[['location_train', 'location_test']].fillna(0).astype(int).sum(axis=1)
groups = t.groupby('location')
t['position'] = groups.cumcount() / groups['id'].transform(len)
return t[['id', 'x1', 'x2', 'position']].set_index('id')
#combine all tables
resourceType = pd.read_csv(join(data_path, 'resource_type.csv'), index_col=0)
resourceTypeVectorized = pd.get_dummies(resourceType).groupby(resourceType.index).sum().astype(int)
resourceTypeVectorized['resource_type_count'] =
|
pd.get_dummies(resourceType)
|
pandas.get_dummies
|
# -*- coding: utf-8 -*-
from datetime import timedelta
from distutils.version import LooseVersion
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas import (
DatetimeIndex, Int64Index, Series, Timedelta, TimedeltaIndex, Timestamp,
date_range, timedelta_range
)
from pandas.errors import NullFrequencyError
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(params=['B', 'D'])
def freq(request):
return request.param
class TestTimedeltaIndexArithmetic(object):
# Addition and Subtraction Operations
# -------------------------------------------------------------
# TimedeltaIndex.shift is used by __add__/__sub__
def test_tdi_shift_empty(self):
# GH#9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
def test_tdi_shift_hours(self):
# GH#9903
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_tdi_shift_minutes(self):
# GH#9903
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_tdi_shift_int(self):
# GH#8083
trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
result = trange.shift(1)
expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00',
'4 days 01:00:00', '5 days 01:00:00'],
freq='D')
tm.assert_index_equal(result, expected)
def test_tdi_shift_nonstandard_freq(self):
# GH#8083
trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
result = trange.shift(3, freq='2D 1s')
expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03',
'8 days 01:00:03', '9 days 01:00:03',
'10 days 01:00:03'], freq='D')
tm.assert_index_equal(result, expected)
def test_shift_no_freq(self):
# GH#19147
tdi = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00'], freq=None)
with pytest.raises(NullFrequencyError):
tdi.shift(2)
# -------------------------------------------------------------
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and integer
def test_tdi_add_int(self, one):
# Variants of `one` for #19012
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + one
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
def test_tdi_iadd_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
rng += one
tm.assert_index_equal(rng, expected)
def test_tdi_sub_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - one
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
def test_tdi_isub_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_add_integer_array(self, box):
# GH#19959
rng = timedelta_range('1 days 09:00:00', freq='H', periods=3)
other = box([4, 3, 2])
expected = TimedeltaIndex(['1 day 13:00:00'] * 3)
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_sub_integer_array(self, box):
# GH#19959
rng = timedelta_range('9H', freq='H', periods=3)
other = box([4, 3, 2])
expected = TimedeltaIndex(['5H', '7H', '9H'])
result = rng - other
tm.assert_index_equal(result, expected)
result = other - rng
tm.assert_index_equal(result, -expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_addsub_integer_array_no_freq(self, box):
# GH#19959
tdi = TimedeltaIndex(['1 Day', 'NaT', '3 Hours'])
other = box([14, -1, 16])
with pytest.raises(NullFrequencyError):
tdi + other
with pytest.raises(NullFrequencyError):
other + tdi
with pytest.raises(NullFrequencyError):
tdi - other
with pytest.raises(NullFrequencyError):
other - tdi
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and timedelta-like
# Note: add and sub are tested in tests.test_arithmetic
def test_tdi_iadd_timedeltalike(self, delta):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng += delta
tm.assert_index_equal(rng, expected)
def test_tdi_isub_timedeltalike(self, delta):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
pytest.raises(TypeError, lambda: tdi - dt)
pytest.raises(TypeError, lambda: tdi - dti)
pytest.raises(TypeError, lambda: td - dt)
pytest.raises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td =
|
Timedelta('1 days')
|
pandas.Timedelta
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import nose
from numpy import nan
from pandas import Timestamp
from pandas.core.index import MultiIndex
from pandas.core.api import DataFrame
from pandas.core.series import Series
from pandas.util.testing import (assert_frame_equal, assert_series_equal
)
from pandas.compat import (lmap)
from pandas import compat
import pandas.core.common as com
import numpy as np
import pandas.util.testing as tm
import pandas as pd
class TestGroupByFilter(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.ts = tm.makeTimeSeries()
self.seriesd = tm.getSeriesData()
self.tsd = tm.getTimeSeriesData()
self.frame = DataFrame(self.seriesd)
self.tsframe = DataFrame(self.tsd)
self.df = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
self.df_mixed_floats = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.array(
np.random.randn(8), dtype='float32')})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.mframe = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self.three_group = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_filter_series(self):
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
expected_odd = pd.Series([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = pd.Series([20, 22, 24], index=[2, 4, 5])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
assert_series_equal(
grouped.filter(lambda x: x.mean() < 10), expected_odd)
assert_series_equal(
grouped.filter(lambda x: x.mean() > 10), expected_even)
# Test dropna=False.
assert_series_equal(
grouped.filter(lambda x: x.mean() < 10, dropna=False),
expected_odd.reindex(s.index))
assert_series_equal(
grouped.filter(lambda x: x.mean() > 10, dropna=False),
expected_even.reindex(s.index))
def test_filter_single_column_df(self):
df = pd.DataFrame([1, 3, 20, 5, 22, 24, 7])
expected_odd = pd.DataFrame([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = pd.DataFrame([20, 22, 24], index=[2, 4, 5])
grouper = df[0].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
assert_frame_equal(
grouped.filter(lambda x: x.mean() < 10), expected_odd)
assert_frame_equal(
grouped.filter(lambda x: x.mean() > 10), expected_even)
# Test dropna=False.
assert_frame_equal(
grouped.filter(lambda x: x.mean() < 10, dropna=False),
expected_odd.reindex(df.index))
assert_frame_equal(
grouped.filter(lambda x: x.mean() > 10, dropna=False),
expected_even.reindex(df.index))
def test_filter_multi_column_df(self):
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': [1, 1, 1, 1]})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
expected = pd.DataFrame({'A': [12, 12], 'B': [1, 1]}, index=[1, 2])
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() - x['B'].sum() > 10),
expected)
def test_filter_mixed_df(self):
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
expected = pd.DataFrame({'A': [12, 12], 'B': ['b', 'c']}, index=[1, 2])
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() > 10), expected)
def test_filter_out_all_groups(self):
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
assert_series_equal(grouped.filter(lambda x: x.mean() > 1000), s[[]])
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() > 1000), df.loc[[]])
def test_filter_out_no_groups(self):
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
filtered = grouped.filter(lambda x: x.mean() > 0)
assert_series_equal(filtered, s)
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
filtered = grouped.filter(lambda x: x['A'].mean() > 0)
assert_frame_equal(filtered, df)
def test_filter_out_all_groups_in_df(self):
# GH12768
df = pd.DataFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
res = df.groupby('a')
res = res.filter(lambda x: x['b'].sum() > 5, dropna=False)
expected = pd.DataFrame({'a': [nan] * 3, 'b': [nan] * 3})
assert_frame_equal(expected, res)
df = pd.DataFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
res = df.groupby('a')
res = res.filter(lambda x: x['b'].sum() > 5, dropna=True)
expected = pd.DataFrame({'a': [], 'b': []}, dtype="int64")
assert_frame_equal(expected, res)
def test_filter_condition_raises(self):
def raise_if_sum_is_zero(x):
if x.sum() == 0:
raise ValueError
else:
return x.sum() > 0
s = pd.Series([-1, 0, 1, 2])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
self.assertRaises(TypeError,
lambda: grouped.filter(raise_if_sum_is_zero))
def test_filter_with_axis_in_groupby(self):
# issue 11041
index = pd.MultiIndex.from_product([range(10), [0, 1]])
data = pd.DataFrame(
np.arange(100).reshape(-1, 20), columns=index, dtype='int64')
result = data.groupby(level=0,
axis=1).filter(lambda x: x.iloc[0, 0] > 10)
expected = data.iloc[:, 12:20]
assert_frame_equal(result, expected)
def test_filter_bad_shapes(self):
df = DataFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
s = df['B']
g_df = df.groupby('B')
g_s = s.groupby(s)
f = lambda x: x
self.assertRaises(TypeError, lambda: g_df.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
f = lambda x: x == 1
self.assertRaises(TypeError, lambda: g_df.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
f = lambda x: np.outer(x, x)
self.assertRaises(TypeError, lambda: g_df.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
def test_filter_nan_is_false(self):
df = DataFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
s = df['B']
g_df = df.groupby(df['B'])
g_s = s.groupby(s)
f = lambda x: np.nan
assert_frame_equal(g_df.filter(f), df.loc[[]])
assert_series_equal(g_s.filter(f), s[[]])
def test_filter_against_workaround(self):
np.random.seed(0)
# Series of ints
s = Series(np.random.randint(0, 100, 1000))
grouper = s.apply(lambda x: np.round(x, -1))
grouped = s.groupby(grouper)
f = lambda x: x.mean() > 10
old_way = s[grouped.transform(f).astype('bool')]
new_way = grouped.filter(f)
assert_series_equal(new_way.sort_values(), old_way.sort_values())
# Series of floats
s = 100 * Series(np.random.random(1000))
grouper = s.apply(lambda x: np.round(x, -1))
grouped = s.groupby(grouper)
f = lambda x: x.mean() > 10
old_way = s[grouped.transform(f).astype('bool')]
new_way = grouped.filter(f)
assert_series_equal(new_way.sort_values(), old_way.sort_values())
# Set up DataFrame of ints, floats, strings.
from string import ascii_lowercase
letters = np.array(list(ascii_lowercase))
N = 1000
random_letters = letters.take(np.random.randint(0, 26, N))
df = DataFrame({'ints': Series(np.random.randint(0, 100, N)),
'floats': N / 10 * Series(np.random.random(N)),
'letters': Series(random_letters)})
# Group by ints; filter on floats.
grouped = df.groupby('ints')
old_way = df[grouped.floats.
transform(lambda x: x.mean() > N / 20).astype('bool')]
new_way = grouped.filter(lambda x: x['floats'].mean() > N / 20)
assert_frame_equal(new_way, old_way)
# Group by floats (rounded); filter on strings.
grouper = df.floats.apply(lambda x: np.round(x, -1))
grouped = df.groupby(grouper)
old_way = df[grouped.letters.
transform(lambda x: len(x) < N / 10).astype('bool')]
new_way = grouped.filter(lambda x: len(x.letters) < N / 10)
assert_frame_equal(new_way, old_way)
# Group by strings; filter on ints.
grouped = df.groupby('letters')
old_way = df[grouped.ints.
transform(lambda x: x.mean() > N / 20).astype('bool')]
new_way = grouped.filter(lambda x: x['ints'].mean() > N / 20)
assert_frame_equal(new_way, old_way)
def test_filter_using_len(self):
# BUG GH4447
df = DataFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
grouped = df.groupby('B')
actual = grouped.filter(lambda x: len(x) > 2)
expected = DataFrame(
{'A': np.arange(2, 6),
'B': list('bbbb'),
'C': np.arange(2, 6)}, index=np.arange(2, 6))
assert_frame_equal(actual, expected)
actual = grouped.filter(lambda x: len(x) > 4)
expected = df.loc[[]]
assert_frame_equal(actual, expected)
# Series have always worked properly, but we'll test anyway.
s = df['B']
grouped = s.groupby(s)
actual = grouped.filter(lambda x: len(x) > 2)
expected = Series(4 * ['b'], index=np.arange(2, 6), name='B')
assert_series_equal(actual, expected)
actual = grouped.filter(lambda x: len(x) > 4)
expected = s[[]]
assert_series_equal(actual, expected)
def test_filter_maintains_ordering(self):
# Simple case: index is sequential. #4621
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]})
s = df['pid']
grouped = df.groupby('tag')
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
|
assert_frame_equal(actual, expected)
|
pandas.util.testing.assert_frame_equal
|
#!/usr/bin/python
# encoding: utf-8
"""
@author: xuk1
@license: (C) Copyright 2013-2017
@contact: <EMAIL>
@file: bb_parse.py
@time: 8/28/2017 11:03
@desc:
"""
import os
import re
import sys
from collections import OrderedDict
import numpy as np
import pandas as pd
class BBParse:
"""
Parse TPCx-BB log from BigBenchTimes.csv
"""
phase_name = ['BENCHMARK', 'LOAD_TEST', 'POWER_TEST', 'THROUGHPUT_TEST_1',
'VALIDATE_POWER_TEST', 'VALIDATE_THROUGHPUT_TEST_1']
def __init__(self, bb_log_path):
"""
Constructor for parse TPCx-BB log
:param bb_log_path: TPCx-BB log path, dir 'run-logs' is not included
"""
self.bb_log_path = bb_log_path
pass
def get_stamp_by_phase(self, phase, *stream_query_num):
"""
get start and end timestamp of each phase or each query from TPCx-BB logs
:param phase: phase names includes: ['BENCHMARK', 'LOAD_TEST', 'POWER_TEST',
'THROUGHPUT_TEST_1', 'VALIDATE_POWER_TEST', 'VALIDATE_THROUGHPUT_TEST_1']
:param stream_query_num: optional, maximum two inputs are allowed,
one is for the stream number the other is for the query number
:return: start and end timestamp
"""
if phase in self.phase_name:
csv_path = self.bb_log_path + os.sep + 'run-logs' + os.sep + 'BigBenchTimes.csv'
if not os.path.isfile(csv_path):
print('BigBenchTimes.csv does not exist in {0}, existing...'.format(self.bb_log_path))
exit(-1)
df =
|
pd.read_csv(csv_path, delimiter=';')
|
pandas.read_csv
|
from itertools import takewhile
from os import listdir
from os.path import join
import networkx as nx
import numpy as np
import pandas as pd
from tqdm import tqdm
def parse_participants(data_path):
participants_path = join(data_path, "participants.csv")
# Get the dictionary of parameters
param_dict = {}
with open(participants_path, "r") as fobj:
header_iter = takewhile(lambda s: s.startswith("#"), fobj)
for item in header_iter:
parameter, value = item[1:].split(":")
parameter = parameter.strip()
value = value.strip()
param_dict[parameter] = value
# Read the participants data
participants = pd.read_csv(participants_path, comment="#")
return participants, param_dict
def load_edgelists(graph_path, participants, sample_size, n_vertices, extension):
# Make an empty tensor to hold the graphs
graphs = np.zeros(shape=(sample_size, n_vertices, n_vertices))
# Iterate over graphs and store them in `participants`
for fl in tqdm(listdir(graph_path), "Loading graphs"):
if not fl.endswith(extension):
continue
subid = fl.split("_")[0]
idx = participants.index[participants["participant_id"] == subid]
with open(join(graph_path, fl), "rb") as edgelist:
G = nx.read_edgelist(edgelist)
adj = nx.to_numpy_array(G, nodelist=sorted(G.nodes), dtype=np.float)
graphs[idx[0]] = adj
participants.at[idx, "slice"] = idx[0]
return graphs
def load_metrics(metrics_path, extension=".csv"):
holder = []
for fl in tqdm(listdir(metrics_path), "Loading metrics"):
if not fl.endswith(extension):
continue
subid = fl.split(".")[0]
df = pd.read_csv(join(metrics_path, fl), skiprows=2)
df["participant_id"] = subid
holder.append(df)
metrics =
|
pd.concat(holder, axis=0)
|
pandas.concat
|
"""
Submodule for working with compositional data.
"""
import pandas as pd
from . import codata
from ..util.log import Handle
logger = Handle(__name__)
def attribute_transform(f, *args, **kwargs):
"""
Decorator to add transform function as a dataframe attribute after
transformation, for traceability.
Parameters
-----------
f : :class:`func` | :class:`class`
Transform function.
Returns
-------
:class:`func` | :class:`class`
Object with modified docstring.
"""
def wrapper(*args, **kwargs):
output = f(*args, **kwargs)
output.attrs["transform"] = f.__name__
return output
return wrapper
# note that only some of these methods will be valid for series
@
|
pd.api.extensions.register_series_accessor("pyrocomp")
|
pandas.api.extensions.register_series_accessor
|
import os
import pandas as pd
import numpy as np
import nips15
import logging
from scipy.cluster import hierarchy
from scipy.spatial.distance import pdist
folds_dir = 'models/jmlr/folds'
demographic = ['female', 'afram']
molecular = ['aca', 'scl']
pfvc_spec = {'t':'years_seen_full', 'y':'pfvc', 'x1':demographic, 'x2':demographic + molecular}
pfvc = pd.read_csv('data/benchmark_pfvc.csv')
pfvc_pd = [nips15.PatientData.from_tbl(tbl, **pfvc_spec) for _, tbl in pfvc.groupby('ptid')]
tss_spec = {'t':'years_seen', 'y':'tss', 'x1':demographic, 'x2':demographic + molecular}
tss =
|
pd.read_csv('data/benchmark_tss.csv')
|
pandas.read_csv
|
import os
import numpy as np
from openpyxl import load_workbook
import pandas as pd
import pytest
from geochem_dataset.excel import Dataset
from geochem_dataset.excel.dataclasses import Survey
from geochem_dataset.excel.exceptions import IntegrityError
from helpers.utils import xlref, xlrowref, xlcolref
TEST_FILE_NAME = 'SURVEYS.xlsx'
TEST_SHEET_NAME = 'SURVEYS'
TEST_COLUMNS = ('TITLE', 'ORGANIZATION', 'YEAR_BEGIN', 'YEAR_END', 'PARTY_LEADER', 'DESCRIPTION', 'GSC_CATALOG_NUMBER')
TEST_DATA = [
('2011, Till sampling survey, Hall Peninsula. Canada-Nunavut Geoscience Office', 'Canada-Nunavut Geoscience Office', 2011, 2013, '<NAME>', 'A test description', 1000),
]
ERROR_MESSAGES = {
'missing_worksheet': 'Worksheet {worksheet} is missing from workbook {workbook}',
'missing_columns': 'Worksheet {workbook}::{worksheet} is missing columns: {column_names}',
'extra_columns': 'Worksheet {workbook}::{worksheet} has extra columns: {column_names}',
'too_few_rows': 'Worksheet {workbook}::{worksheet} has too few rows (min is {min_rows} and max is {max_rows})',
'unique_constraint_violation': 'Row {row} of worksheet {workbook}::{worksheet} violated a unique constraint on columns: {columns} (duplicate of row {other_row})',
}
class TestSurveys:
def test_surveys(self, dataset_path):
# Build expected rows
expected_surveys = [Survey(*args) for args in TEST_DATA]
# Assert
with Dataset(dataset_path) as dataset:
surveys = list(dataset.surveys)
assert surveys == expected_surveys
def test_surveys_with_empty_file(self, dataset_path):
# Modify surveys file
surveys_path = dataset_path / TEST_FILE_NAME
os.truncate(surveys_path, 0)
# Assert
with pytest.raises(ValueError) as excinfo:
with Dataset(dataset_path) as dataset:
pass
def test_surveys_with_missing_sheet(self, dataset_path):
# Modify surveys file
surveys_path = dataset_path / TEST_FILE_NAME
wb = load_workbook(surveys_path)
ws = wb[TEST_SHEET_NAME]
ws.title = "Skittles"
wb.save(surveys_path)
# Expected
expected_error_msg_kwargs = {
'workbook': TEST_FILE_NAME,
'worksheet': TEST_SHEET_NAME,
}
# Assert
with pytest.raises(IntegrityError) as excinfo:
with Dataset(dataset_path) as dataset:
pass
assert excinfo.value.args[0] == ERROR_MESSAGES['missing_worksheet'].format(**expected_error_msg_kwargs)
def test_surveys_with_missing_columns(self, dataset_path):
# Modify surveys file
surveys_path = dataset_path / TEST_FILE_NAME
with
|
pd.ExcelWriter(surveys_path)
|
pandas.ExcelWriter
|
import finterstellar as fs
import pandas as pd
import numpy as np
import datetime as dt
class LoadData:
def read_investing_price(self, path, cd):
file_name = path + cd + ' Historical Data.csv'
df = pd.read_csv(file_name, index_col='Date')
return (df)
def create_portfolio_df(self, path, p_name, p_cd):
new_df = self.make_historical_price_df(path, p_cd)
prices_df = self.create_master_file(path, p_name, new_df)
prices_df = self.update_master_file(path, p_name, new_df)
return (prices_df)
def make_historical_price_df(self, path, s_cd):
cds = fs.str_list(s_cd)
dates = pd.Series()
for c in cds:
prices_df = self.read_investing_price(path, c)
prices_df = self.date_formatting(prices_df)
c = prices_df['Price']
dates_new = pd.Series(prices_df.index)
dates = dates.append(dates_new)
dates = dates.drop_duplicates().sort_values().reset_index()
dates = dates.drop(['index'], axis=1)
universe_df = pd.DataFrame(index=dates[0])
universe_df.index.name = 'Date'
for c in cds:
prices_df = self.read_investing_price(path, c)
prices_df = self.date_formatting(prices_df)
prices_df = self.price_df_trimming(prices_df, c)
universe_df[c] = prices_df[c]
universe_df
universe_df = universe_df.fillna(method='ffill')
return (universe_df)
def create_master_file(self, path, f_name, df):
file_name = path + 'fs ' + f_name + '.csv'
try:
f = open(file_name)
print('Updating master file')
f.close()
except IOError as e:
df.index = pd.to_datetime(df.index)
df.index.name = 'Date'
#df = df.fillna(method='ffill')
#today_date = pd.Timestamp.today().date().strftime('%y%m%d')
df.to_csv(file_name)
return (df)
def update_master_file(self, path, n, new_df):
try:
file_name = 'fs ' + n + '.csv'
master_df = self.read_master_file(path, n)
universe_df = new_df.combine_first(master_df)
universe_df.index.name = 'Date'
#universe_df = universe_df.fillna(method='ffill')
universe_df.to_csv(path + file_name)
except IOError as e:
print('Creating master file')
self.create_master_file(path, n, new_df)
universe_df = new_df
return (universe_df)
def read_master_file(self, path, n):
file_name = path + 'fs ' + n + '.csv'
prices_df = pd.read_csv(file_name, index_col='Date')
dates = []
for i in prices_df.index:
d = pd.to_datetime(i)
dates.append(d)
prices_df['Date'] = dates # Date 값 교체
prices_df = prices_df.set_index('Date')
return (prices_df)
def get_codes(self, prices_df):
codes = prices_df.columns.values
return (codes)
def read_raw_csv(self, path, n):
file_name = path + n + '.csv'
df = pd.read_csv(file_name, index_col='Date')
dates = []
for i in df.index:
#d = dt.datetime.strptime(i, '%Y-%m-%d')
d = pd.to_datetime(i)
dates.append(d)
df['Date'] = dates # Date 값 교체
df = df.set_index('Date')
df.sort_index(axis=0, inplace=True)
return (df)
def read_raw_excel(self, path, n, sheet=None):
file_name = path + n
df = pd.read_excel(file_name, index_col=0)
dates = []
for i in df.index:
d = pd.to_datetime(i)
dates.append(d)
df['Date'] = dates # Date 값 교체
df = df.set_index('Date')
df.sort_index(axis=0, inplace=True)
return (df)
def date_formatting(self, df):
dates = []
for i in df.index:
#d = dt.datetime.strptime(df.iloc[i,0], '%b %d, %Y')
#d = pd.to_datetime(df.iloc[i,0])
d = pd.to_datetime(i)
dates.append(d)
df['Date'] = dates # Date 값 교체
df = df.set_index('Date')
#df = df.sort_index()
return (df)
def price_formatting(self, df, c='Price'):
for i in df.index:
p = df.loc[i, c]
try:
p = p.replace(',', '')
except:
pass
df.loc[i, c] = float(p)
return (df)
def price_df_trimming(self, df, cd):
prices = []
for i in df.index:
p = df['Price'].loc[i]
try:
p = p.replace(',', '')
except:
pass
prices.append(float(p))
df[cd] = prices
df_new = pd.DataFrame(df[cd])
#df = df.drop(df.columns[1:], axis=1)
df_new = df_new.sort_index()
return (df_new)
def read_intraday_csv(self, path, n):
file_name = path + n + '.csv'
df = pd.read_csv(file_name, index_col=0)
time = []
for i in df.index:
d = pd.to_datetime(i).time()
time.append(d)
df['Time'] = time # Date 값 교체
df = df.set_index('Time')
df.sort_index(axis=0, inplace=True)
return (df)
def read_intraday_excel(self, path, n):
file_name = path + n + '.xlsx'
df =
|
pd.read_excel(file_name, index_col=0)
|
pandas.read_excel
|
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 5 13:08:01 2021
@author: <NAME>
"""
import scipy
import numpy as np
import pandas as pd
import statsmodels.api as sm
import scipy.stats as stats
#print stats.stats.spearmanr(x,y)
def factor_IC_test(factor_data, market_cap_data, stock_return):
"""
:param factor_data: the residual of the regression(factor exposure(t) with respect to market-cap(t) and
industries factor(t)(dummy)
:param stock_return: monthly stock return (t+1)
:return: correlations between factor exposure(t) and stock return(t+1) (a dataframe)
tips: We use this residual as a proxy of factor exposure, which is both industries-adjusted and market-cap-adjusted;
Examine the mean (significance), std(stability), IR ratio(mean/std), the propotion that correlation>0 (direction)
"""
Ic=pd.DataFrame()
beta0=pd.DataFrame()
length=min(factor_data.shape[1],market_cap_data.shape[1])#74
for i in range(7,length):#2015-06
y = np.array(factor_data.iloc[:,i]) # 因变量为factor第i数据
x = np.array(market_cap_data.iloc[:,i]) # 自变量为第 i列数据
x = sm.add_constant(x) # 若模型中有截距,必须有这一步
model = sm.OLS(y, x).fit() # 构建最小二乘模型并拟合
a=model.resid
beta0[i-7]=a
# beta0=factor_data
length=min(beta0.shape[1],stock_return.shape[1])
for i in range(length):
#Ic.append(scipy.stats.pearsonr(beta0.iloc[:,i], stock_return.iloc[:,i]))
#Ic.append(stats.stats.spearmanr(beta0.iloc[:,i], stock_return.iloc[:,i]))
Ic[i]=stats.stats.spearmanr(beta0.iloc[:,i], stock_return.iloc[:,i])
residuals=Ic.iloc[0,:]
p_value=Ic.iloc[1,:]
print("%d residuals are:" % len(residuals))
#print(Ic.iloc[0,:])
print("the %d p_value of the residuals are:" % len(residuals))
# print(Ic.iloc[1,:])
print("the Percentage of positive residuals is:")
print(residuals[residuals>0].count()/len(residuals))
print("the stand devition of the residual are: ")
print(residuals.std())
print("the absulute mean of the residuals are: ")
residuals=residuals.abs()
print(residuals.mean())
print("the stand devition of the p_value are: ")
print(p_value.std())
print("the absulute mean of the p_value are: ")
p_value=p_value.abs()
print(p_value.mean())
return 0
if __name__ == '__main__':
path0="C:/Users/zhang dejian/Downloads/resource/703/project/CI/Stock_return2.csv"
path1="C:/Users/zhang dejian/Downloads/resource/703/project/CI/Market_Value.csv"
path2="C:/Users/zhang dejian/Downloads/resource/703/project/CI/EP.csv"
path3="C:/Users/zhang dejian/Downloads/resource/703/project/CI/BP.csv"
path4="C:/Users/zhang dejian/Downloads/resource/703/project/CI/ROA.csv"
path5="C:/Users/zhang dejian/Downloads/resource/703/project/CI/ROE.csv"
path6="C:/Users/zhang dejian/Downloads/resource/703/project/CI/CFP.csv"
path7="C:/Users/zhang dejian/Downloads/resource/703/project/CI/asset_to_liability.csv"
path8="C:/Users/zhang dejian/Downloads/resource/703/project/CI/CF_to_Liability.csv"
path9="C:/Users/zhang dejian/Downloads/resource/703/project/CI/debt_to_asset.csv"
path10="C:/Users/zhang dejian/Downloads/resource/703/project/CI/RSI-30.csv"
path11="C:/Users/zhang dejian/Downloads/resource/703/project/CI/Turnover.csv"
path12="C:/Users/zhang dejian/Downloads/resource/703/project/CI/cash_ratio.csv"
path13="C:/Users/zhang dejian/Downloads/resource/703/project/CI/Div_yeild.csv"
path14="C:/Users/zhang dejian/Downloads/resource/703/project/CI/EBITDA_EV.csv"
path15="C:/Users/zhang dejian/Downloads/resource/703/project/CI/volatility.csv"
stock_return=pd.read_csv(path0)
market_cap_data=pd.read_csv(path1)
EP=pd.read_csv(path2)
BP=pd.read_csv(path3)
ROA=pd.read_csv(path4)
ROE=pd.read_csv(path5)
CFP=pd.read_csv(path6)
asset_to_liability=pd.read_csv(path7)
CF_to_Liability=
|
pd.read_csv(path8)
|
pandas.read_csv
|
import IMLearn.learners.regressors.linear_regression
from IMLearn.learners.regressors import PolynomialFitting
from IMLearn.utils import split_train_test
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def load_data(filename: str):
"""
Load city daily temperature dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (Temp)
"""
full_data =
|
pd.read_csv(filename)
|
pandas.read_csv
|
from collections import defaultdict
import logging
from multiprocessing import current_process, Manager, Pool, Process, Queue
import time
import warnings
import joblib
import matplotlib.pyplot as plt
import numpy as np
import numpyro
import pandas as pd
import timeout_decorator
from timeout_decorator import TimeoutError
from remade.fits import fit_Bayesian, fit_frequentist, fit_utils, io
from remade.rich import progress
logger = logging.getLogger(__name__)
numpyro.enable_x64()
#%%
timeout_first_fit = 5 * 60 # 5 minutes, very first fit
timeout_subsequent_fits = 60 # 1 minute
#%%
def get_groupby(df_mismatch):
return df_mismatch.groupby("tax_id", sort=False, observed=True)
def group_to_numpyro_data(cfg, group):
forward = "CT"
forward_ref = forward[0]
reverse = "GA"
reverse_ref = reverse[0]
z = np.array(group.iloc[:15]["position"], dtype=int)
k_forward = np.array(group.iloc[:15][forward], dtype=int)
N_forward = np.array(group.iloc[:15][forward_ref], dtype=int)
k_reverse = np.array(group.iloc[-15:][reverse], dtype=int)
N_reverse = np.array(group.iloc[-15:][reverse_ref], dtype=int)
data = {
"z": np.concatenate([z, -z]),
"k": np.concatenate([k_forward, k_reverse]),
"N": np.concatenate([N_forward, N_reverse]),
}
return data
#%%
def add_count_information(fit_result, group, data):
fit_result["N_z1_forward"] = data["N"][0]
fit_result["N_z1_reverse"] = data["N"][15]
fit_result["N_sum_total"] = data["N"].sum()
fit_result["N_sum_forward"] = data["N"][:15].sum()
fit_result["N_sum_reverse"] = data["N"][15:].sum()
fit_result["N_min"] = data["N"].min()
fit_result["k_sum_total"] = data["k"].sum()
fit_result["k_sum_forward"] = data["k"][:15].sum()
fit_result["k_sum_reverse"] = data["k"][15:].sum()
#%%
def fit_single_group_without_timeout(
cfg,
group,
mcmc_PMD=None,
mcmc_null=None,
):
fit_result = {}
data = group_to_numpyro_data(cfg, group)
# add_tax_information(fit_result, group)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
f, f_forward, f_reverse = fit_frequentist.make_fits(fit_result, data)
add_count_information(fit_result, group, data)
if mcmc_PMD is not None and mcmc_null is not None:
fit_Bayesian.make_fits(fit_result, data, mcmc_PMD, mcmc_null)
return fit_result
def get_fit_single_group_with_timeout(timeout=60):
"""timeout in seconds"""
return timeout_decorator.timeout(timeout)(fit_single_group_without_timeout)
def compute_fits_seriel(cfg, df_mismatch):
# initializez not MCMC if cfg.bayesian is False
mcmc_PMD, mcmc_null = fit_Bayesian.init_mcmcs(cfg)
groupby = get_groupby(df_mismatch)
d_fit_results = {}
fit_single_group_first_fit = get_fit_single_group_with_timeout(timeout_first_fit)
fit_single_group_subsequent_fits = get_fit_single_group_with_timeout(
timeout_subsequent_fits
)
fit_single_group = fit_single_group_first_fit
logger.info(f"Fit: Initializing fit in seriel.")
task_fit = progress.add_task(
"task_status_fitting",
progress_type="known_total",
status="Fitting",
name="Fits: ",
total=len(groupby),
)
for tax_id, group in groupby:
# break
try:
fit_result = fit_single_group(cfg, group, mcmc_PMD, mcmc_null)
d_fit_results[tax_id] = fit_result
except TimeoutError:
logger.warning(f"Fit: Timeout at tax_id {tax_id}. Skipping for now")
progress.advance(task_fit)
fit_single_group = fit_single_group_subsequent_fits
return d_fit_results
def worker(cfg, queue_in, queue_out):
# initializez not MCMC if cfg.bayesian is False
mcmc_PMD, mcmc_null = fit_Bayesian.init_mcmcs(cfg)
fit_single_group_first_fit = get_fit_single_group_with_timeout(timeout_first_fit)
fit_single_group_subsequent_fits = get_fit_single_group_with_timeout(
timeout_subsequent_fits
)
# first run is patient
fit_single_group = fit_single_group_first_fit
while True:
# block=True means make a blocking call to wait for items in queue
tax_id_group = queue_in.get(block=True)
if tax_id_group is None:
break
tax_id, group = tax_id_group
try:
fit_result = fit_single_group(cfg, group, mcmc_PMD, mcmc_null)
queue_out.put((tax_id, fit_result))
except TimeoutError:
queue_out.put((tax_id, TimeoutError))
fit_single_group = fit_single_group_subsequent_fits
def compute_fits_parallel_with_progressbar(cfg, df_mismatch):
# logger.info(f"Fit: Initializing fit in parallel with progressbar")
groupby = get_groupby(df_mismatch)
N_groupby = len(groupby)
N_cores = cfg.N_cores if cfg.N_cores < N_groupby else N_groupby
manager = Manager()
queue_in = manager.Queue()
queue_out = manager.Queue()
the_pool = Pool(N_cores, worker, (cfg, queue_in, queue_out))
d_fit_results = {}
task_fit = progress.add_task(
"task_status_fitting",
progress_type="known_total",
status="Fitting",
name="Fits: ",
total=N_groupby,
)
for tax_id, group in groupby:
queue_in.put((tax_id, group))
# Get and print results
for _ in range(N_groupby):
tax_id, fit_result = queue_out.get()
if fit_result is not TimeoutError:
d_fit_results[tax_id] = fit_result
else:
logger.warning(f"Fit: Timeout at tax_id {tax_id}. Skipping for now")
progress.advance(task_fit)
for _ in range(N_groupby):
queue_in.put(None)
# prevent adding anything more to the process pool and wait for all processes to finish
the_pool.close()
the_pool.join()
return d_fit_results
#%%
def match_tax_id_order_in_df_fit_results(df_fit_results, df_mismatch):
tax_ids_all =
|
pd.unique(df_mismatch.tax_id)
|
pandas.unique
|
import datetime
import logging
# import warnings
import pandas as pd
from markdown2 import markdown
from tabulate import tabulate
tabulate.PRESERVE_WHITESPACE = True
logging.getLogger(__name__)
__author__ = "<NAME>, <NAME> <<EMAIL>>, <NAME>"
__copyright__ = "Copyright \xa9 2018. The University of Chicago. All Rights Reserved."
#
# DISCLAIMER: these methods were developed with a particular version of the webapp in mind. They lack flexibility (lots of
# hardcoded things)!!!
# If new features(fairness measures, etc..) are added to the webapp this needs to change a lot...
#
def get_group_value_report(group_value_df):
"""
:param group_value_df:
:return:
"""
group_value_report = {}
the_false_df = group_value_df.loc[(group_value_df['Unsupervised Fairness'] == False) | (
group_value_df['Supervised Fairness'] == False)]
for index, row in the_false_df.iterrows():
report = ''
metrics = []
group = row['group_variable'] + ' = ' + row['group_value']
text1 = group + ' does not have '
report += text1
text2 = ''
if row['Unsupervised Fairness'] is False:
text2 += 'Unsupervised Fairness '
text3 = ''
if row['Statistical Parity'] is False:
text3 += '(no Statistical Parity'
ref_val = 0.0
ref_group_value = group_value_df.loc[(group_value_df['group_variable'] == row[
'group_variable']) & (group_value_df['group_value'] == row[
'ppr_ref_group_value'])]['ppr'].values[0]
ppr_text = '{:.0f}% of the group is selected, compared to {:.0f} % of the ' \
'reference group '.format(row['ppr'] * 100, ref_group_value * 100) + \
row['group_variable'] + ' = ' + row['ppr_ref_group_value']
metrics.append(ppr_text)
if row['Impact Parity'] is False:
if text3 == '':
text3 += '(no Impact Parity)'
else:
text3 += ', no Impact Parity)'
pprev_text = ''
else:
text3 += ')'
text2 += text3
if row['Supervised Fairness'] is False:
if text2 != '':
text2 += ' neither '
text2 += 'Supervised Fairness '
text3 = ''
if row['TypeI Parity'] is False:
text3 += '(no Type I Parity'
if row['TypeII Parity'] is False:
if text3 == '':
text3 += '(no Type II Parity)'
else:
text3 += ', no Type II Parity)'
else:
text3 += ') '
text2 += text3
report += text2
group_value_report[group] = [report, metrics]
return group_value_report
def get_highlevel_report(group_attribute_df):
group_attribute_df = group_attribute_df.applymap(str)
cols = ['attribute_name']
if 'Unsupervised Fairness' in group_attribute_df.columns:
cols.append('Unsupervised Fairness')
if 'Supervised Fairness' in group_attribute_df.columns:
cols.append('Supervised Fairness')
group_attribute_df = group_attribute_df[cols]
map = {}
attr_list = group_attribute_df['attribute_name'].unique()
for col in group_attribute_df.columns:
if col == 'attribute_name':
colstr = 'Attribute'
else:
colstr = col
map[col] = colstr
# to be able to click on true/false and redirect to the next section
if col != 'attribute_name':
for attr in attr_list:
group_attribute_df.loc[group_attribute_df['attribute_name'] == attr, col] = '[' + group_attribute_df[col][
group_attribute_df['attribute_name'] == attr] + ']' + '(#' + '-'.join(attr.lower().split(' ')) + ')'
for attr in attr_list:
group_attribute_df = group_attribute_df.replace(attr, '[' + attr + ']' + '(#' + '-'.join(attr.lower().split(' ')) + ')')
group_attribute_df = group_attribute_df.rename(index=str, columns=map)
highlevel_report = tabulate(group_attribute_df, headers='keys', tablefmt='pipe', showindex='never', numalign="left")
return highlevel_report
def get_parity_group_report(group_value_df, attribute, fairness_measures, fairness_measures_depend):
group_value_df = group_value_df.round(2)
group_value_df = group_value_df.applymap(str)
def_cols = ['attribute_value']
# make copy of relevant rows as new df
aux_df = group_value_df.loc[group_value_df['attribute_name'] == attribute].copy()
metrics = {}
for par, disp in fairness_measures_depend.items():
if par in fairness_measures:
metrics[par] = disp
# getting a reference group label
for col in aux_df.columns:
if col in metrics.keys():
ref_group = metrics[col].replace('_disparity', '_ref_group_value')
# set value in rows of new df for reference group equal to Ref
indicate_ref = lambda x, y: x if x != y else 'Ref'
aux_df.loc[:, col] = aux_df[['attribute_value', ref_group]].apply(lambda x: indicate_ref(*x), axis=1)
map = {}
aux_df = aux_df[def_cols + fairness_measures]
for col in aux_df.columns:
if col == 'attribute_value':
colstr = 'Attribute Value'
else:
colstr = col
map[col] = colstr #+ ' '
aux_df.loc[:, col] = '[' + aux_df[col] + ']' + '(#' + '-'.join(attribute.lower().split(' ')) + '-2)'
aux_df = aux_df.rename(index=str, columns=map)
cols_order = ['Attribute Value', 'Statistical Parity', 'Impact Parity', 'FDR Parity', 'FPR Parity', 'FOR Parity',
'FNR Parity']
new_order = [col for col in cols_order if col in aux_df.columns]
aux_df = aux_df[new_order]
parity_group = tabulate(aux_df,
headers='keys',
tablefmt='pipe', showindex='never', numalign="left")
return parity_group
def setup_group_value_df(group_value_df, fairness_measures, fairness_measures_depend):
group_value_df = group_value_df.round(2)
group_value_df = group_value_df.applymap(str)
group_size = group_value_df['group_size_pct']
metrics = {}
for par, disp in fairness_measures_depend.items():
if par in fairness_measures:
metrics[disp] = par
metrics[disp.replace('_disparity', '')] = par
aux_df = group_value_df[['attribute_name', 'attribute_value'] + list(metrics.keys())]
for col in group_value_df.columns:
if col in metrics.keys():
# we want to keep the ref group without green/red so we need to know the name of the column to search for
if not col.endswith('_disparity'):
ref_group = col + '_ref_group_value'
else:
ref_group = col.replace('_disparity', '_ref_group_value')
group_value_df.loc[(group_value_df[metrics[col]] == 'True') & (group_value_df['attribute_value'] != group_value_df[
ref_group]), col] = '##green## ' + group_value_df[col][group_value_df[metrics[col]] == 'True']
group_value_df.loc[group_value_df[metrics[col]] == 'False', col] = '##red##' + group_value_df[col][group_value_df[
metrics[
col]] == 'False']
# group_value_df['group_size_pct'] = group_size
# print('**********GROUP SIZES********\n', group_value_df['group_size_pct'])
return group_value_df
def get_disparities_group_report(group_value_df, attribute, fairness_measures, fairness_measures_depend):
def_cols = ['attribute_value']
metrics = {}
for par, disp in fairness_measures_depend.items():
if par in fairness_measures:
metrics[disp] = par
aux_df = group_value_df.loc[group_value_df['attribute_name'] == attribute]
aux_df = aux_df[def_cols + list(metrics.keys())]
map = {}
for col in aux_df.columns:
colstr = col.replace('_', ' ')
if col == 'attribute_value':
colstr = 'Attribute Value'
else:
colstr = colstr.split(' ')[0].upper() + ' Disparity'
map[col] = colstr #+ ' '
aux_df.loc[:,col] = '[' + aux_df[col] + ']' + '(#' + '-'.join(attribute.lower().split(' ')) + '-3)'
aux_df = aux_df.rename(index=str, columns=map)
# this is hardcoded. If metrics supported by aequitas change this needs to change
cols_order = ['Attribute Value', 'PPR Disparity', 'PPREV Disparity', 'FDR Disparity', 'FPR Disparity', 'FOR Disparity',
'FNR Disparity']
new_order = [col for col in cols_order if col in aux_df.columns]
aux_df = aux_df[new_order]
disparities_group = tabulate(aux_df,
headers='keys',
tablefmt='pipe', showindex='never', numalign="left")
return disparities_group
def get_group_group_report(group_value_df, attribute, fairness_measures, fairness_measures_depend):
# defining how to display stuff
names = {'attribute_value': 'Attribute Value',
'group_size_pct': 'Group Size Ratio'}
def_cols = ['attribute_value', 'group_size_pct']
for par, disp in fairness_measures_depend.items():
if par in fairness_measures:
def_cols.append(disp.replace('_disparity', ''))
aux_df = group_value_df.loc[group_value_df['attribute_name'] == attribute]
aux_df = aux_df[def_cols]
aux_df = aux_df.round(2)
aux_df = aux_df.astype(str)
# fixing the same order of columns every time!
cols_order = ['attribute_value', 'group_size_pct', 'ppr', 'pprev', 'fdr', 'fpr', 'for', 'fnr']
new_order = [col for col in cols_order if col in aux_df.columns]
aux_df = aux_df[new_order]
map = {}
for col in aux_df.columns:
if col in names:
colstr = names[col]
else:
colstr = col.upper()
map[col] = colstr #+ ' '
aux_df = aux_df.rename(index=str, columns=map)
group_group = tabulate(aux_df,
headers='keys',
tablefmt='pipe', showindex='never', numalign="left")
return group_group
def get_sentence_highlevel(fair_results):
sent = '**The Bias Report audited the risk assessmentt system and has found that is has'
is_fair = ' passed' if fair_results['Overall Fairness'] is True else ' failed'
sent += is_fair + ' the audit with respect to the following fairness criteria:**\n\n'
return sent
def get_statpar_text(group_value_df, fairness_measures_depend):
group_value_df = group_value_df.round(2)
group_value_df = group_value_df.applymap(str)
fairness_metric = 'Statistical Parity'
false_df = group_value_df.loc[group_value_df[fairness_metric] == 'False']
bias_metric = fairness_measures_depend[fairness_metric]
group_metric = bias_metric.replace('_disparity', '')
ref_group_col = group_metric + '_ref_group_value'
text_detail = ''
false_dict = {}
for index, row in false_df.iterrows():
ref_group_row = group_value_df.loc[(group_value_df['attribute_name'] == row['attribute_name']) &
(group_value_df['attribute_value'] == row[ref_group_col])]
sentence = ' is' \
' **{group_metric_value}**% of positive class.' \
'' \
''.format(
group_metric_value='%.0f' % (float(row[group_metric]) * 100),
)
try:
false_dict[row['attribute_name']].add('[' + row['attribute_value'] + '](#equal-parity)' + sentence)
except KeyError:
false_dict[row['attribute_name']] = set()
false_dict[row['attribute_name']].add('[' + row['attribute_value'] + '](#equal-parity)' + sentence)
if false_df.empty:
cellref = '##green##Based on the fairness threshold used, the number of selected positives is similar across ' \
'different ' \
'groups.\n\n'
else:
cellref = ''
for key in false_dict.keys():
cellref += '**{attribute_name}:** ##br##   '.format(attribute_name=key)
cellref += '##br##   '.join(false_dict[key]) + ' ##br##'
return cellref
def get_impact_text(group_value_df, fairness_measures_depend):
group_value_df = group_value_df.round(2)
group_value_df = group_value_df.applymap(str)
fairness_metric = 'Impact Parity'
false_df = group_value_df.loc[group_value_df[fairness_metric] == 'False']
bias_metric = fairness_measures_depend[fairness_metric]
group_metric = bias_metric.replace('_disparity', '')
ref_group_col = group_metric + '_ref_group_value'
text_detail = ''
false_dict = {}
for index, row in false_df.iterrows():
ref_group_row = group_value_df.loc[(group_value_df['attribute_name'] == row['attribute_name']) &
(group_value_df['attribute_value'] == row[ref_group_col])]
sentence = ': **{group_metric_value}**% of the group is in the selected set (classified as positive),' \
' in comparison to {ref_group_metric_value}% from the reference group \"{' \
'ref_group_value}\"' \
''.format(
group_metric_value='%.0f' % (float(row[group_metric]) * 100),
attribute_value=row['attribute_value'],
ref_group_metric_value='%.0f' % (float(ref_group_row[group_metric].values[0]) * 100),
ref_group_value=row[ref_group_col])
try:
false_dict[row['attribute_name']].add('[' + row['attribute_value'] + '](#proportional-parity)' + sentence)
except KeyError:
false_dict[row['attribute_name']] = set()
false_dict[row['attribute_name']].add('[' + row['attribute_value'] + '](#proportional-parity)' + sentence)
if false_df.empty:
cellref = '##green##Based on the fairness threshold used, the percentage of selected individuals from ' \
'each group is not disparate to the percentage of selected individuals of the respective reference group.\n\n'
else:
cellref = ''
for key in false_dict.keys():
cellref += '**{attribute_name}:** ##br##   '.format(attribute_name=key)
cellref += '##br##   '.join(false_dict[key]) + ' ##br##'
return cellref
def get_old_false_text(group_value_df, fairness_metric, fairness_measures_depend):
names = {
'fpr': 'false positive rate',
'fnr': 'false negative rate',
'fdr': 'false discovery rate',
'for': 'false omission rate',
'ppr': 'predicted positive ratio',
'pprev': 'predicted prevalence (percentage of positives in the group)'
}
group_value_df = group_value_df.round(2)
group_value_df = group_value_df.applymap(str)
false_df = group_value_df.loc[group_value_df[fairness_metric] == 'False']
bias_metric = fairness_measures_depend[fairness_metric]
group_metric = bias_metric.replace('_disparity', '')
ref_group_col = group_metric + '_ref_group_value'
text_detail = ''
false_dict = {}
for index, row in false_df.iterrows():
ref_group_row = group_value_df.loc[(group_value_df['attribute_name'] == row['attribute_name']) &
(group_value_df['attribute_value'] == row[ref_group_col])]
sentence = ': **{bias_metric_value}**% ' \
'of the {group_metric_name} of the reference group \"{ref_group_value}\",' \
' corresponding to a difference of {group_metric_value} vs {ref_group_metric_value}.' \
.format(
group_metric_name=names[group_metric],
bias_metric_value='%.0f' % (float(row[bias_metric]) * 100),
ref_group_value=row[ref_group_col],
group_metric_value=row[group_metric],
ref_group_metric_value=ref_group_row[group_metric].values[0])
try:
false_dict[row['attribute_name']].add('[' + row['attribute_value'] + '](#false-positive-parity)' + sentence)
except KeyError:
false_dict[row['attribute_name']] = set()
false_dict[row['attribute_name']].add('[' + row['attribute_value'] + '](#false-positive-parity)' + sentence)
if false_df.empty:
cellref = '##green##Based on the fairness threshold used, the percentage of selected elements from ' \
'each group is not disparate to the percentage of selected elements of the respective reference group.\n\n'
else:
cellref = ''
for key in false_dict.keys():
cellref += '**{attribute_name}:** ##br##   '.format(attribute_name=key)
cellref += '##br##   '.join(false_dict[key]) + ' ##br##'
return cellref
def get_false_text(group_value_df, fairness_metric, fairness_measures_depend):
names = {
'fpr': 'false positive rate',
'fnr': 'false negative rate',
'fdr': 'false discovery rate',
'for': 'false omission rate',
'ppr': 'predicted positive ratio',
'pprev': 'predicted prevalence (percentage of positives in the group)'
}
group_value_df = group_value_df.round(2)
group_value_df = group_value_df.applymap(str)
false_df = group_value_df.loc[group_value_df[fairness_metric] == 'False']
bias_metric = fairness_measures_depend[fairness_metric]
group_metric = bias_metric.replace('_disparity', '')
ref_group_col = group_metric + '_ref_group_value'
text_detail = ''
false_dict = {}
ref_group_dict = {}
for index, row in false_df.iterrows():
ref_group_row = group_value_df.loc[(group_value_df['attribute_name'] == row['attribute_name']) &
(group_value_df['attribute_value'] == row[ref_group_col])]
sentence = ' with ++span-red-init++{bias_metric_value}X++span-red-end++ Disparity'.format(
bias_metric_value='%.2f' % float(row[bias_metric]))
ref_group_dict[row['attribute_name']] = ' (with reference group as **' + row[ref_group_col] + '**)'
sentence2 = '{group_metric_name} of this group is {group_metric_value} compared to {ref_group_metric_value} for the reference group {ref_group_value}.' \
.format(
group_metric_name=names[group_metric],
ref_group_value=row[ref_group_col],
group_metric_value=row[group_metric],
ref_group_metric_value=ref_group_row[group_metric].values[0])
try:
false_dict[row['attribute_name']].add('##tooltip-start-title##' + sentence2 + '##tooltip-end-title##' + row[
'attribute_value'] + '##tooltip-end-anchor##' +
sentence)
except KeyError:
false_dict[row['attribute_name']] = set()
false_dict[row['attribute_name']].add('##tooltip-start-title##' + sentence2 + '##tooltip-end-title##' + row[
'attribute_value'] + '##tooltip-end-anchor##' +
sentence)
if false_df.empty:
cellref = '++span-green-init++Based on the fairness threshold used, all groups passed the audit for this metric.++span-green-end++\n\n'
else:
cellref = ''
for key in false_dict.keys():
cellref += '**For {attribute_name}**'.format(attribute_name=key) + ref_group_dict[key] + '##br##   '
cellref += '##br##   '.join(false_dict[key]) + ' ##br## ##br##'
return cellref
def get_highlevel_table(group_value_df, fairness_measures, ):
supported_name = {'Statistical Parity': '[Equal Parity](#equal-parity)',
'Impact Parity': '[Proportional Parity](#proportional-parity)',
'TypeI Parity': '[False Positive Parity](#false-positive-parity)',
'TypeII Parity': '[False Negative Parity](#false-negative-parity)'}
supported_outcome = {'Statistical Parity': 'Each group is represented equally.',
'Impact Parity': 'Each group is represented proportional to their representation in the overall population.',
'TypeI Parity': 'Each group has proportionally equal false positive errors made by the model.',
'TypeII Parity': 'Each group has proportionally equal false negative errors made by the model.'}
supported_order = ['Statistical Parity', 'Impact Parity', 'TypeI Parity', 'TypeII Parity']
# once again this is hardcoded because it's easy now, in the future make it mapped automatically
map_ref_groups = {'Statistical Parity': ['ppr_ref_group_value'],
'Impact Parity': ['pprev_ref_group_value'],
'TypeI Parity': ['fpr_ref_group_value', 'fdr_ref_group_value'],
'TypeII Parity': ['fnr_ref_group_value', 'for_ref_group_value']}
key_columns = ['model_id', 'score_threshold', 'attribute_name']
fairness_measures_edited = []
for meas in fairness_measures:
if meas in ['FPR Parity', 'FDR Parity']:
fairness_measures_edited.append('TypeI Parity')
elif meas in ['FNR Parity', 'FOR Parity']:
fairness_measures_edited.append('TypeII Parity')
else:
fairness_measures_edited.append(meas)
fairness_measures_edited = set(fairness_measures_edited)
raw = {
'Fairness Criteria': [],
'Desired Outcome': [],
'Reference Groups Selected': [],
'Unfairly Affected Groups': []
}
for measure in supported_order:
if measure in fairness_measures_edited:
raw['Fairness Criteria'].append(supported_name[measure])
raw['Desired Outcome'].append(supported_outcome[measure])
false_df = group_value_df.loc[group_value_df[measure] == False]
ref_dict = {}
false_dict = {}
for index, row in false_df.iterrows():
try:
false_dict[row['attribute_name']].add('[' + row['attribute_value'] + ']' + \
supported_name[measure][supported_name[measure].find('('):])
except KeyError:
false_dict[row['attribute_name']] = set()
false_dict[row['attribute_name']].add('[' + row['attribute_value'] + ']' + \
supported_name[measure][supported_name[measure].find('('):])
attr_order = []
if len(false_dict) > 0:
cell = ''
attr_order = false_dict.keys()
for key in attr_order:
cell += '**{attribute_name}:**'.format(attribute_name=key)
cell += '##br##   '
cell += '##br##   '.join(false_dict[key]) + ' ##br##'
raw['Unfairly Affected Groups'].append(cell)
else:
if group_value_df[measure].isnull().all():
raw['Unfairly Affected Groups'].append('Undefined')
else:
raw['Unfairly Affected Groups'].append('No Unfair Groups Found')
for ref in map_ref_groups[measure]:
groupby_refs = group_value_df.groupby(key_columns + [ref])
for group, values in groupby_refs:
try:
ref_dict[group[key_columns.index('attribute_name')]].add('[' + group[-1] + '](' + '-'.join(
supported_name[
measure]
.lower().split(' ')) + ')')
except KeyError:
ref_dict[group[key_columns.index('attribute_name')]] = set()
ref_dict[group[key_columns.index('attribute_name')]].add('[' + group[-1] + '](' + '-'.join(
supported_name[
measure].lower().split(
' ')) + ')')
cellref = ''
align_rows = True if attr_order else False
refs_order = attr_order if attr_order else ref_dict.keys()
newline = '##br##'
idented = '   '
for key in refs_order:
cellref += '**{attribute_name}:**'.format(attribute_name=key) + newline
cellref += idented + list(ref_dict[key])[0] + ' ##br##'
if align_rows:
cellref += ''.join([newline] * (len(false_dict[key]) - 1)) # this is the number of lines to skip in cell
else:
cellref += newline
raw['Reference Groups Selected'].append(cellref)
highlevel_table = '\n\n'
if len(raw['Fairness Criteria']) > 0:
landf = pd.DataFrame(raw, columns=['Fairness Criteria', 'Desired Outcome', 'Reference Groups Selected',
'Unfairly Affected Groups'])
# keep the same order!!
# krrp
highlevel_table = tabulate(landf[['Fairness Criteria', 'Desired Outcome', 'Reference Groups Selected',
'Unfairly Affected Groups']], headers='keys',
tablefmt='pipe', showindex='never', numalign="left")
return highlevel_table
def audit_summary(configs, group_value_df):
supported_name2 = {'Statistical Parity': 'Equal Parity',
'Impact Parity': 'Proportional Parity',
'FPR Parity': 'False Positive Rate Parity',
'FDR Parity': 'False Discovery Rate Parity',
'FNR Parity': 'False Negative Rate Parity',
'FOR Parity': 'False Omission Rate Parity'}
supported_name = {'Statistical Parity': '**Equal Parity** - Ensure all protected groups are have equal representation in the selected set.',
'Impact Parity': '**Proportional Parity** - Ensure all protected groups are selected proportional to their '
'percentage of the population.',
'FPR Parity': '**False Positive Rate Parity** - Ensure all protected groups have the same false positive '
'rates as the reference group).',
'FDR Parity': '**False Discovery Rate Parity** - Ensure all protected groups have equally proportional false '
'positives within the selected set (compared to the reference group).',
'FNR Parity': '**False Negative Rate Parity** - Ensure all protected groups have the same false negative rates ('
'as the reference group).',
'FOR Parity': '**False Omission Rate Parity** - Ensure all protected groups have equally proportional false '
'negatives within the non-selected set (compared to the reference group).'}
raw = {
'column1': [],
'column2': [],
'column3': []
}
supported_order = ['Statistical Parity', 'Impact Parity', 'FPR Parity', 'FDR Parity', 'FNR Parity', 'FOR Parity']
measures_results_dict = {}
for measure in supported_order:
if measure in configs.fair_measures_requested:
raw['column1'].append(supported_name[measure])
false_df = group_value_df.loc[group_value_df[measure] == False]
if false_df.empty:
link = ' [Details](#' + '-'.join(supported_name2[measure].lower().split(' ')) + \
'-span-green-initpassedspan-green-end)'
raw['column2'].append('++span-green-init++**Passed**++span-green-end++')
raw['column3'].append(link)
measures_results_dict[measure] = '++span-green-init++Passed++span-green-end++'
else:
link = ' [Details](#' + '-'.join(supported_name2[measure].lower().split(' ')) + \
'-span-red-initfailedspan-red-end)'
raw['column2'].append('++span-red-init++**Failed**++span-red-end++')
raw['column3'].append(link)
measures_results_dict[measure] = '++span-red-init++Failed++span-red-end++'
df = pd.DataFrame(raw, columns=['column1', 'column2', 'column3'])
summ_table = tabulate(df[['column1', 'column2', 'column3']], headers='keys',
tablefmt='pipe', showindex='never', numalign="left")
return summ_table, measures_results_dict
def audit_description(configs, group_value_df):
supported_name = {'Statistical Parity': '**Equal Parity** - Ensure all protected groups are have equal representation in the selected set.',
'Impact Parity': '**Proportional Parity** - Ensure all protected groups are selected proportional to their '
'percentage of the population.',
'FPR Parity': '**False Positive Rate Parity** - Ensure all protected groups have the same false positive '
'rates as the reference group).',
'FDR Parity': '**False Discovery Rate Parity** - Ensure all protected groups have equally proportional false '
'positives within the selected set (compared to the reference group).',
'FNR Parity': '**False Negative Rate Parity** - Ensure all protected groups have the same false negative rates ('
'as the reference group).',
'FOR Parity': '**False Omission Rate Parity** - Ensure all protected groups have equally proportional false '
'negatives within the non-selected set (compared to the reference group).'}
supported_order = ['Statistical Parity', 'Impact Parity', 'FPR Parity', 'FDR Parity', 'FNR Parity', 'FOR Parity']
ref_group = {'predefined': 'Custom group - The reference groups you selected for each attribute will be used to '
'calculate relative disparities in this audit.',
'majority': 'Majority group - The largest groups on each attribute will be used as baseline to calculate '
'relative '
'disparities in this audit.',
'min_metric': 'Automatically select, for each bias metric, the group on each attribute that has the '
'lower '
'value, to be used as baseline to calculate relative disparities in this audit.'
}
raw = {
'column1': [],
'column2': []
}
raw['column1'].append('**Audit Date:**')
raw['column2'].append(datetime.date.today().strftime('%d %b %Y'))
raw['column1'].append('**Data Audited:**')
raw['column2'].append('{:.0f} rows'.format(group_value_df['total_entities'].values[0]))
raw['column1'].append('**Attributes Audited:**')
raw['column2'].append(', '.join(group_value_df['attribute_name'].unique()))
raw['column1'].append('**Audit Goal(s):**')
measures = [supported_name[m] for m in supported_order if m in configs.fair_measures_requested]
raw['column2'].append('\n'.join(measures) + '\n')
raw['column1'].append('**Reference Groups:**')
raw['column2'].append(ref_group[configs.ref_groups_method])
raw['column1'].append('**Fairness Threshold:**')
thresh = '{:.0f}%. If disparity for a group is within {' \
':.0f}% and ' \
'{' \
':.0f}% of ' \
' the value of the reference group on a group metric (e.g. False ' \
'Positive Rate), this audit will pass. ' \
''.format(
float(configs.fairness_threshold) * 100, float(configs.fairness_threshold) * 100,
float(1.0 / configs.fairness_threshold) * 100)
raw['column2'].append(thresh)
df =
|
pd.DataFrame(raw, columns=['column1', 'column2'])
|
pandas.DataFrame
|
"""
Copyright 2020 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime
import pandas as pd
import pytest
from testfixtures import Replacer
from testfixtures.mock import Mock
import gs_quant.timeseries.measures_reports as mr
from gs_quant.api.gs.assets import GsTemporalXRef
from gs_quant.api.gs.data import MarketDataResponseFrame
from gs_quant.data.core import DataContext
from gs_quant.errors import MqValueError
from gs_quant.markets.report import PerformanceReport, ThematicReport
from gs_quant.markets.securities import Stock
from gs_quant.models.risk_model import FactorRiskModel as Factor_Risk_Model
from gs_quant.target.common import ReportParameters, XRef
from gs_quant.target.reports import Report, PositionSourceType, ReportType
from gs_quant.target.risk_models import RiskModel, CoverageType, Term, UniverseIdentifier
risk_model = RiskModel(coverage=CoverageType.Country, id_='model_id', name='Fake Risk Model',
term=Term.Long, universe_identifier=UniverseIdentifier.gsid, vendor='GS',
version=1.0)
factor_risk_report = Report(position_source_id='position source id',
position_source_type=PositionSourceType.Portfolio,
type_=ReportType.Portfolio_Factor_Risk,
id_='report_id',
parameters=ReportParameters(risk_model='risk_model_id'),
status='new')
ppa_report = Report(position_source_id='position source id',
position_source_type=PositionSourceType.Portfolio,
type_=ReportType.Portfolio_Performance_Analytics,
id_='report_id',
parameters=ReportParameters(risk_model='risk_model_id'),
status='new')
factor_data = [
{
'date': '2020-11-23',
'reportId': 'report_id',
'factor': 'factor_id',
'factorCategory': 'CNT',
'pnl': 11.23,
'exposure': -11.23,
'proportionOfRisk': 1
},
{
'date': '2020-11-24',
'reportId': 'report_id',
'factor': 'factor_id',
'factorCategory': 'CNT',
'pnl': 11.24,
'exposure': -11.24,
'proportionOfRisk': 2
},
{
'date': '2020-11-25',
'reportId': 'report_id',
'factor': 'factor_id',
'factorCategory': 'CNT',
'pnl': 11.25,
'exposure': -11.25,
'proportionOfRisk': 3
}
]
aggregate_factor_data = [
{
'date': '2020-11-23',
'reportId': 'report_id',
'factor': 'Factor',
'factorCategory': 'CNT',
'pnl': 11.23,
'exposure': -11.23,
'proportionOfRisk': 1,
'dailyRisk': 1,
'annualRisk': 1
},
{
'date': '2020-11-24',
'reportId': 'report_id',
'factor': 'Factor',
'factorCategory': 'CNT',
'pnl': 11.24,
'exposure': -11.24,
'proportionOfRisk': 2,
'dailyRisk': 2,
'annualRisk': 2
},
{
'date': '2020-11-25',
'reportId': 'report_id',
'factor': 'Factor',
'factorCategory': 'CNT',
'pnl': 11.25,
'exposure': -11.25,
'proportionOfRisk': 3,
'dailyRisk': 3,
'annualRisk': 3
}
]
constituents_data_l_s = {
'assetId': [
"MA1",
"MA1",
"MA1",
"MA2",
"MA2",
"MA2"
],
'quantity': [
-1,
-2,
-3,
1,
2,
3
],
'netExposure': [
-1,
-2,
-3,
1,
2,
3
],
'pnl': [
0,
-1,
-1,
0,
1,
1
],
'date': [
'2020-01-02',
'2020-01-03',
'2020-01-04',
'2020-01-02',
'2020-01-03',
'2020-01-04'
]
}
constituents_data = {
'netExposure': [
1,
2,
3
],
'assetId': [
"MA",
"MA",
"MA"
],
'quantity': [
1,
1,
1
],
'pnl': [
0,
1,
1
],
'date': [
'2020-01-02',
'2020-01-03',
'2020-01-04'
]
}
thematic_data = [
{
"date": "2021-07-12",
"reportId": "PTAID",
"basketId": "MA01GPR89HZF1FZ5",
"region": "Asia",
"grossExposure": 3.448370345015856E8,
"thematicExposure": 2,
"thematicBeta": 1,
"updateTime": "2021-07-20T23:43:38Z"
},
{
"date": "2021-07-13",
"reportId": "PTAID",
"basketId": "MA01GPR89HZF1FZ5",
"region": "Asia",
"grossExposure": 3.375772519907556E8,
"thematicExposure": 2,
"thematicBeta": 1,
"updateTime": "2021-07-20T23:43:38Z"
},
{
"date": "2021-07-14",
"reportId": "PTAID",
"basketId": "MA01GPR89HZF1FZ5",
"region": "Asia",
"grossExposure": 3.321189950666118E8,
"thematicExposure": 2,
"thematicBeta": 1,
"updateTime": "2021-07-20T23:43:38Z"
},
{
"date": "2021-07-15",
"reportId": "PTAID",
"basketId": "MA01GPR89HZF1FZ5",
"region": "Asia",
"grossExposure": 3.274071805135091E8,
"thematicExposure": 2,
"thematicBeta": 1,
"updateTime": "2021-07-20T23:43:38Z"
}
]
def mock_risk_model():
risk_model = RiskModel(coverage=CoverageType.Country, id_='model_id', name='Fake Risk Model',
term=Term.Long, universe_identifier=UniverseIdentifier.gsid, vendor='GS',
version=1.0)
replace = Replacer()
# mock getting risk model entity()
mock = replace('gs_quant.api.gs.risk_models.GsRiskModelApi.get_risk_model', Mock())
mock.return_value = risk_model
actual = Factor_Risk_Model.get(model_id='model_id')
replace.restore()
return actual
def test_factor_exposure():
replace = Replacer()
# mock getting risk model entity()
mock = replace('gs_quant.api.gs.risk_models.GsRiskModelApi.get_risk_model', Mock())
mock.return_value = risk_model
mock = replace('gs_quant.api.gs.reports.GsReportApi.get_report', Mock())
mock.return_value = factor_risk_report
# mock getting report factor data
mock = replace('gs_quant.api.gs.reports.GsReportApi.get_factor_risk_report_results', Mock())
mock.return_value = factor_data
# mock getting risk model dates
mock = replace('gs_quant.api.gs.risk_models.GsRiskModelApi.get_risk_model_dates', Mock())
mock.return_value = ['2010-01-01']
# mock getting risk model factor category
mock = replace('gs_quant.api.gs.risk_models.GsFactorRiskModelApi.get_risk_model_data', Mock())
mock.return_value = {
'results': [{
'factorData': [{
'factorId': 'factor_id',
'factorCategory': 'Factor Name'
}]}
]}
# mock getting risk model factor entity
mock = replace('gs_quant.api.gs.risk_models.GsFactorRiskModelApi.get_risk_model_factor_data', Mock())
mock.return_value = [{
'identifier': 'factor_id',
'type': 'Factor',
'name': 'Factor Name',
'factorCategory': 'Factor Name'
}]
with DataContext(datetime.date(2020, 11, 23), datetime.date(2020, 11, 25)):
actual = mr.factor_exposure('report_id', 'Factor Name')
assert all(actual.values == [-11.23, -11.24, -11.25])
with pytest.raises(MqValueError):
mr.factor_exposure('report_id', 'Wrong Factor Name')
replace.restore()
def test_factor_pnl():
replace = Replacer()
# mock getting risk model entity()
mock = replace('gs_quant.api.gs.risk_models.GsRiskModelApi.get_risk_model', Mock())
mock.return_value = risk_model
mock = replace('gs_quant.api.gs.reports.GsReportApi.get_report', Mock())
mock.return_value = factor_risk_report
# mock getting report factor data
mock = replace('gs_quant.api.gs.reports.GsReportApi.get_factor_risk_report_results', Mock())
mock.return_value = factor_data
# mock getting risk model dates
mock = replace('gs_quant.api.gs.risk_models.GsRiskModelApi.get_risk_model_dates', Mock())
mock.return_value = ['2010-01-01']
# mock getting risk model factor category
mock = replace('gs_quant.api.gs.risk_models.GsFactorRiskModelApi.get_risk_model_data', Mock())
mock.return_value = {
'results': [{
'factorData': [{
'factorId': 'factor_id',
'factorCategory': 'Factor Name'
}]}
]}
# mock getting risk model factor entity
mock = replace('gs_quant.api.gs.risk_models.GsFactorRiskModelApi.get_risk_model_factor_data', Mock())
mock.return_value = [{
'identifier': 'factor_id',
'type': 'Factor',
'name': 'Factor Name',
'factorCategory': 'Factor Name'
}]
with DataContext(datetime.date(2020, 11, 23), datetime.date(2020, 11, 25)):
actual = mr.factor_pnl('report_id', 'Factor Name')
assert all(actual.values == [11.23, 11.24, 11.25])
with pytest.raises(MqValueError):
mr.factor_pnl('report_id', 'Wrong Factor Name')
replace.restore()
def test_factor_proportion_of_risk():
replace = Replacer()
# mock getting risk model entity()
mock = replace('gs_quant.api.gs.risk_models.GsRiskModelApi.get_risk_model', Mock())
mock.return_value = risk_model
mock = replace('gs_quant.api.gs.reports.GsReportApi.get_report', Mock())
mock.return_value = factor_risk_report
# mock getting report factor data
mock = replace('gs_quant.api.gs.reports.GsReportApi.get_factor_risk_report_results', Mock())
mock.return_value = factor_data
# mock getting risk model dates
mock = replace('gs_quant.api.gs.risk_models.GsRiskModelApi.get_risk_model_dates', Mock())
mock.return_value = ['2010-01-01']
# mock getting risk model factor category
mock = replace('gs_quant.api.gs.risk_models.GsFactorRiskModelApi.get_risk_model_data', Mock())
mock.return_value = {
'results': [{
'factorData': [{
'factorId': 'factor_id',
'factorCategory': 'Factor Name'
}]}
]}
# mock getting risk model factor entity
mock = replace('gs_quant.api.gs.risk_models.GsFactorRiskModelApi.get_risk_model_factor_data', Mock())
mock.return_value = [{
'identifier': 'factor_id',
'type': 'Factor',
'name': 'Factor Name',
'factorCategory': 'Factor Name'
}]
with DataContext(datetime.date(2020, 11, 23), datetime.date(2020, 11, 25)):
actual = mr.factor_proportion_of_risk('report_id', 'Factor Name')
assert all(actual.values == [1, 2, 3])
with pytest.raises(MqValueError):
mr.factor_proportion_of_risk('report_id', 'Wrong Factor Name')
replace.restore()
def test_get_factor_data():
replace = Replacer()
mock = replace('gs_quant.api.gs.reports.GsReportApi.get_report', Mock())
mock.return_value = ppa_report
with pytest.raises(MqValueError):
mr.factor_proportion_of_risk('report_id', 'Factor Name')
replace.restore()
def test_aggregate_factor_support():
replace = Replacer()
# mock getting risk model entity()
mock = replace('gs_quant.api.gs.risk_models.GsRiskModelApi.get_risk_model', Mock())
mock.return_value = risk_model
mock = replace('gs_quant.api.gs.reports.GsReportApi.get_report', Mock())
mock.return_value = factor_risk_report
# mock getting report factor data
mock = replace('gs_quant.api.gs.reports.GsReportApi.get_factor_risk_report_results', Mock())
mock.return_value = aggregate_factor_data
# mock getting risk model dates
mock = replace('gs_quant.api.gs.risk_models.GsRiskModelApi.get_risk_model_dates', Mock())
mock.return_value = ['2010-01-01']
# mock getting risk model factor category
mock = replace('gs_quant.api.gs.risk_models.GsFactorRiskModelApi.get_risk_model_data', Mock())
mock.return_value = {
'results': [{
'factorData': [{
'factorId': 'factor_id',
'factorCategory': 'Factor Name'
}]}
]}
# mock getting risk model factor entity
mock = replace('gs_quant.api.gs.risk_models.GsFactorRiskModelApi.get_risk_model_factor_data', Mock())
mock.return_value = [{
'identifier': 'factor_id',
'type': 'Factor',
'name': 'Factor Name',
'factorCategory': 'Factor Name'
}]
with DataContext(datetime.date(2020, 11, 23), datetime.date(2020, 11, 25)):
actual = mr.factor_proportion_of_risk('report_id', 'Factor')
assert all(actual.values == [1, 2, 3])
with DataContext(datetime.date(2020, 11, 23), datetime.date(2020, 11, 25)):
actual = mr.daily_risk('report_id', 'Factor')
assert all(actual.values == [1, 2, 3])
with DataContext(datetime.date(2020, 11, 23), datetime.date(2020, 11, 25)):
actual = mr.annual_risk('report_id', 'Factor')
assert all(actual.values == [1, 2, 3])
with pytest.raises(MqValueError):
mr.daily_risk('report_id', 'Factor Name')
with pytest.raises(MqValueError):
mr.annual_risk('report_id', 'Factor Name')
replace.restore()
def test_normalized_performance():
idx = pd.date_range('2020-01-02', freq='D', periods=3)
replace = Replacer()
expected = {None: pd.Series(data=[1, 2, 3], index=idx,
name='normalizedPerformance', dtype='float64'),
"Long": pd.Series(data=[1, 2, 3], index=idx,
name='normalizedPerformance', dtype='float64')}
mock = replace('gs_quant.api.gs.portfolios.GsPortfolioApi.get_reports', Mock())
mock.return_value = [
Report.from_dict({'id': 'RP1', 'positionSourceType': 'Portfolio', 'positionSourceId': 'MP1',
'type': 'Portfolio Performance Analytics',
'parameters': {'transactionCostModel': 'FIXED'}})]
# mock PerformanceReport.get_portfolio_constituents()
mock = replace('gs_quant.markets.report.PerformanceReport.get_portfolio_constituents', Mock())
mock.return_value = MarketDataResponseFrame(data=constituents_data, dtype="float64")
# mock PerformanceReport.get()
mock = replace('gs_quant.markets.report.PerformanceReport.get', Mock())
mock.return_value = PerformanceReport(report_id='RP1',
position_source_type='Portfolio',
position_source_id='MP1',
report_type='Portfolio Performance Analytics',
parameters=ReportParameters(transaction_cost_model='FIXED'))
for k, v in expected.items():
with DataContext(datetime.date(2020, 1, 1), datetime.date(2019, 1, 3)):
actual = mr.normalized_performance('MP1', k)
assert all(actual.values == v.values)
replace.restore()
def test_normalized_performance_short():
idx = pd.date_range('2020-01-02', freq='D', periods=3)
replace = Replacer()
expected = {"Short": pd.Series(data=[1, 1 / 2, 1 / 3], index=idx,
name='normalizedPerformance', dtype='float64'),
"Long": pd.Series(data=[1, 2, 3], index=idx,
name='normalizedPerformance', dtype='float64'),
None: pd.Series(data=[1, (2 + 1 / 2) / 2, (3 + 1 / 3) / 2], index=idx,
name='normalizedPerformance', dtype='float64')}
mock = replace('gs_quant.api.gs.portfolios.GsPortfolioApi.get_reports', Mock())
mock.return_value = [
Report.from_dict({'id': 'RP1', 'positionSourceType': 'Portfolio', 'positionSourceId': 'MP1',
'type': 'Portfolio Performance Analytics',
'parameters': {'transactionCostModel': 'FIXED'}})]
# mock PerformanceReport.get_portfolio_constituents()
mock = replace('gs_quant.markets.report.PerformanceReport.get_portfolio_constituents', Mock())
mock.return_value = MarketDataResponseFrame(data=constituents_data_l_s, dtype="float64")
# mock PerformanceReport.get()
mock = replace('gs_quant.markets.report.PerformanceReport.get', Mock())
mock.return_value = PerformanceReport(report_id='RP1',
position_source_type='Portfolio',
position_source_id='MP1',
report_type='Portfolio Performance Analytics',
parameters=ReportParameters(transaction_cost_model='FIXED'))
for k, v in expected.items():
with DataContext(datetime.date(2020, 1, 1), datetime.date(2019, 1, 3)):
actual = mr.normalized_performance('MP1', k)
assert all((actual.values - v.values) < 0.01)
replace.restore()
def test_thematic_exposure():
replace = Replacer()
# mock getting PTA report
mock = replace('gs_quant.markets.report.ThematicReport.get', Mock())
mock.return_value = ThematicReport(id='report_id')
# mock getting thematic exposure
mock = replace('gs_quant.markets.report.ThematicReport.get_thematic_exposure', Mock())
mock.return_value =
|
pd.DataFrame(thematic_data)
|
pandas.DataFrame
|
# This file was used to pull all the water data from http://lakepowell.water-data.com/
# which was upladed to box. Don't use this to redownload all the data because it
# has to make a bunch of post requests from the website
import requests
from datetime import date
import pandas as pd
from bs4 import BeautifulSoup
def pull_data(request_data):
r = requests.post("http://lakepowell.water-data.com/index2.php", request_data)
soup = BeautifulSoup(r.text, features="lxml")
found_items = soup(text="Water Data for Selected Dates") #look for the title of the table
parent = list(found_items)[0].parent #go to object containing the title (and table)
table = parent.findNext('table') #find the next table in the page
rows = table.findAll("tr") #find all table rows
header = rows[0]
data = rows[1:-1]#all_but first and last row (first is header, last is website computed means)
text_headers = []
for column in header.findAll("th"):
text_headers.append(column.text)
text_data = []
for row in data:
text_row = []
for column in row.findAll("td"):
text_row.append(column.text)
text_data.append(text_row)
return text_headers, text_data
def full_download():
request_data = {}
headers = []
all_data = [[None, None, None, None, None, None, None, None]]
request_data["Get10DateData"] = "Get+Data"
calendar = {"January" : list(range(1, 31+1)) , "February" : list(range(1, 29+1)),
"March" : list(range(1, 31+1)), "April" : list(range(1, 30+1)),
"May" : list(range(1, 31+1)), "June" : list(range(1, 30+1)),
"July": list(range(1, 31+1)), "August" : list(range(1, 31+1)),
"September" : list(range(1, 30+1)), "October" : list(range(1, 31+1)),
"November" : list(range(1, 30+1)), "December": list(range(1, 31+1))}
req_num = 0
cur_year = date.today().year
start_year = 1963
for year in range(start_year, int(cur_year) + 1):
# for year in range(1964, 1965):
print(year)
for month in calendar:
days = calendar[month]
for i in days:
req_num = req_num + 1
request_data["datemonth" + str(req_num)] = month
request_data["dateday" + str(req_num)] = str(i)
request_data["dateyear" + str(req_num)] = year
if req_num == 10:
req_num = 0
headers, data = pull_data(request_data)
all_data.extend(data)
request_data.clear()
request_data["Get10DateData"] = "Get+Data"
df = pd.DataFrame.from_records(all_data, columns=headers)
df.to_csv("lake_powell_conditions.csv")
def update_water_data(water_data_path = "data/water_data"):
water_df =
|
pd.read_csv(water_data_path)
|
pandas.read_csv
|
import pandas as pd
import numpy as np
import string
import os
import json
import gc
from sklearn.preprocessing import LabelEncoder
gc.enable()
dataPath='../../../data/avito-demand-prediction'
## Load csv.zip data
train_active = pd.read_csv(os.path.join(dataPath, 'train_active.csv.zip'), compression='zip')
print('train_active.csv loaded')
test_active = pd.read_csv(os.path.join(dataPath, 'test_active.csv.zip'), compression='zip')
print('test_active.csv loaded')
## Apply label encoding
mapping_folder = 'Label_encoding_basic_active'
if not os.path.exists(mapping_folder):
os.mkdir(mapping_folder)
def compressMainTable(df):
for col in df:
if df[col].dtype=='object' and df[col].nunique() < 3000 and col != 'activation_date':
print(f'encoding {col}...')
le = LabelEncoder()
le.fit(df[col].astype(str))
le_mapping = dict(zip(le.classes_, map(int, le.transform(le.classes_))))
with open(os.path.join(mapping_folder, col+'.json'), 'w', encoding='utf-8') as f:
json.dump(le_mapping, f, indent=4, ensure_ascii=False)
df[col] = le.fit_transform(df[col].astype(str)).astype(np.int16)
df.price = df.price.fillna(-1).astype(np.int64)
df.activation_date = pd.to_datetime(df.activation_date)
return df
tmpCat = pd.concat([train_active,test_active], sort=False)
tmpCat = compressMainTable(tmpCat)
train = tmpCat.iloc[:train_active.shape[0],:]
test = tmpCat.iloc[train_active.shape[0]:,]
## Store into hdf5 storage
# compression parameters
USE_HDF5_COMPRESSION_ARG = {
'format': 'table',
'complib': 'blosc:zstd',
'complevel': 9
}
# Remove any existing hdf5 storage file since it does not support clean overwrite
for f in os.listdir(f'{dataPath}'):
if '.h5' in f and 'active' in f:
os.remove(f'{dataPath}/{f}')
print(f'{dataPath}/{f} removed')
# parameters for chunking
num_chunk_train = 15
num_chunk_test = 13
chunk_size = 1000000
# text Features storage
textFeatures = ['title', 'description']
# text features in train_active
flag = 0
for i in range(num_chunk_train):
with pd.HDFStore(f'{dataPath}/train_active_text_Data_{i}.h5', **USE_HDF5_COMPRESSION_ARG) as active_hdf:
active_hdf['Raw'] = train[['item_id', 'user_id'] + textFeatures][flag:flag+chunk_size]
flag += chunk_size
active_hdf.close()
# text features in test_active
flag = 0
for i in range(num_chunk_test):
with
|
pd.HDFStore(f'{dataPath}/test_active_text_Data_{i}.h5', **USE_HDF5_COMPRESSION_ARG)
|
pandas.HDFStore
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
import nose
import numpy as np
from numpy import nan
import pandas as pd
from distutils.version import LooseVersion
from pandas import (Index, Series, DataFrame, Panel, isnull,
date_range, period_range)
from pandas.core.index import MultiIndex
import pandas.core.common as com
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_panel_equal,
assert_equal)
import pandas.util.testing as tm
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
raise nose.SkipTest('scipy.interpolate.pchip missing')
# ----------------------------------------------------------------------
# Generic types test cases
class Generic(object):
_multiprocess_can_split_ = True
def setUp(self):
pass
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
""" construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed """
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if np.isscalar(value):
if value == 'empty':
arr = None
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
for axis in self._axes():
kwargs = {axis: list('ABCD')}
obj = self._construct(4, **kwargs)
# no values passed
# self.assertRaises(Exception, o.rename(str.lower))
# rename a single axis
result = obj.rename(**{axis: str.lower})
expected = obj.copy()
setattr(expected, axis, list('abcd'))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {}
for i in range(self._ndim):
kwargs[self._typ._AXIS_NAMES[i]] = list(range(n))
# get the numeric data
o = self._construct(n, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n, value='empty', **kwargs)
self._compare(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = self._construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for
# non-inclusion
def test_get_default(self):
# GH 7725
d0 = "a", "b", "c", "d"
d1 = np.arange(4, dtype='int64')
others = "e", 10
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
self.assertEqual(s.get(i), d)
self.assertEqual(s.get(i, d), d)
self.assertEqual(s.get(i, "z"), d)
for other in others:
self.assertEqual(s.get(other, "z"), "z")
self.assertEqual(s.get(other, other), other)
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=1)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=np.nan)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
# empty
obj = self._construct(shape=0)
self.assertRaises(ValueError, lambda: bool(obj))
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
def f():
if obj1:
com.pprint_thing("this works and shouldn't")
self.assertRaises(ValueError, f)
self.assertRaises(ValueError, lambda: obj1 and obj2)
self.assertRaises(ValueError, lambda: obj1 or obj2)
self.assertRaises(ValueError, lambda: not obj1)
def test_numpy_1_7_compat_numeric_methods(self):
# GH 4435
# numpy in 1.7 tries to pass addtional arguments to pandas functions
o = self._construct(shape=4)
for op in ['min', 'max', 'max', 'var', 'std', 'prod', 'sum', 'cumsum',
'cumprod', 'median', 'skew', 'kurt', 'compound', 'cummax',
'cummin', 'all', 'any']:
f = getattr(np, op, None)
if f is not None:
f(o)
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
o = self._construct(shape=4, value=9.)
expected = o.astype(np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, expected)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
# are close
o = self._construct(shape=4, value=9.000000000005)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
expected = o.astype(np.int64)
self._compare(result, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
return self._construct(shape=3, dtype=dtype)
self.assertRaises(NotImplementedError, f, [("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
f('M8[ns]')
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x, m, None)
if y is None:
self.assertIsNone(v)
else:
self.assertEqual(v, getattr(y, m, None))
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = 'foo'
o2 = self._construct(shape=3)
o2.name = 'bar'
# TODO
# Once panel can do non-trivial combine operations
# (currently there is an a raise in the Panel arith_ops to prevent
# this, though it actually does work)
# can remove all of these try: except: blocks on the actual operations
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
result = getattr(o, op)(1)
self.check_metadata(o, result)
# ops with like
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
try:
result = getattr(o, op)(o)
self.check_metadata(o, result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
v1 = getattr(o, op)(o)
self.check_metadata(o, v1)
try:
self.check_metadata(o, v1 & v1)
except (ValueError):
pass
try:
self.check_metadata(o, v1 | v1)
except (ValueError):
pass
# combine_first
try:
result = o.combine_first(o2)
self.check_metadata(o, result)
except (AttributeError):
pass
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
try:
result = o + o2
self.check_metadata(result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
self.check_metadata(v2)
try:
self.check_metadata(v1 & v2)
except (ValueError):
pass
try:
self.check_metadata(v1 | v2)
except (ValueError):
pass
def test_head_tail(self):
# GH5370
o = self._construct(shape=10)
# check all index types
for index in [tm.makeFloatIndex, tm.makeIntIndex, tm.makeStringIndex,
tm.makeUnicodeIndex, tm.makeDateIndex,
tm.makePeriodIndex]:
axis = o._get_axis_name(0)
setattr(o, axis, index(len(getattr(o, axis))))
# Panel + dims
try:
o.head()
except (NotImplementedError):
raise nose.SkipTest('not implemented on {0}'.format(
o.__class__.__name__))
self._compare(o.head(), o.iloc[:5])
self._compare(o.tail(), o.iloc[-5:])
# 0-len
self._compare(o.head(0), o.iloc[0:0])
self._compare(o.tail(0), o.iloc[0:0])
# bounded
self._compare(o.head(len(o) + 1), o)
self._compare(o.tail(len(o) + 1), o)
# neg index
self._compare(o.head(-3), o.head(7))
self._compare(o.tail(-3), o.tail(7))
def test_sample(self):
# Fixes issue: 2419
o = self._construct(shape=10)
###
# Check behavior of random_state argument
###
# Check for stability when receives seed or random state -- run 10
# times.
for test in range(10):
seed = np.random.randint(0, 100)
self._compare(
o.sample(n=4, random_state=seed), o.sample(n=4,
random_state=seed))
self._compare(
o.sample(frac=0.7, random_state=seed), o.sample(
frac=0.7, random_state=seed))
self._compare(
o.sample(n=4, random_state=np.random.RandomState(test)),
o.sample(n=4, random_state=np.random.RandomState(test)))
self._compare(
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
o.sample(frac=0.7, random_state=np.random.RandomState(test)))
# Check for error when random_state argument invalid.
with tm.assertRaises(ValueError):
o.sample(random_state='astring!')
###
# Check behavior of `frac` and `N`
###
# Giving both frac and N throws error
with tm.assertRaises(ValueError):
o.sample(n=3, frac=0.3)
# Check that raises right error for negative lengths
with tm.assertRaises(ValueError):
o.sample(n=-3)
with tm.assertRaises(ValueError):
o.sample(frac=-0.3)
# Make sure float values of `n` give error
with tm.assertRaises(ValueError):
o.sample(n=3.2)
# Check lengths are right
self.assertTrue(len(o.sample(n=4) == 4))
self.assertTrue(len(o.sample(frac=0.34) == 3))
self.assertTrue(len(o.sample(frac=0.36) == 4))
###
# Check weights
###
# Weight length must be right
with tm.assertRaises(ValueError):
o.sample(n=3, weights=[0, 1])
with tm.assertRaises(ValueError):
bad_weights = [0.5] * 11
o.sample(n=3, weights=bad_weights)
with tm.assertRaises(ValueError):
bad_weight_series = Series([0, 0, 0.2])
o.sample(n=4, weights=bad_weight_series)
# Check won't accept negative weights
with tm.assertRaises(ValueError):
bad_weights = [-0.1] * 10
o.sample(n=3, weights=bad_weights)
# Check inf and -inf throw errors:
with tm.assertRaises(ValueError):
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
o.sample(n=3, weights=weights_with_inf)
with tm.assertRaises(ValueError):
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
o.sample(n=3, weights=weights_with_ninf)
# All zeros raises errors
zero_weights = [0] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=zero_weights)
# All missing weights
nan_weights = [np.nan] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=nan_weights)
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10,
'easyweights': easy_weight_list})
sample1 = df.sample(n=1, weights='easyweights')
assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series, panel, or
# DataFrame with axis = 1.
s = Series(range(10))
with tm.assertRaises(ValueError):
s.sample(n=3, weights='weight_column')
panel = pd.Panel(items=[0, 1, 2], major_axis=[2, 3, 4],
minor_axis=[3, 4, 5])
with tm.assertRaises(ValueError):
panel.sample(n=1, weights='weight_column')
with tm.assertRaises(ValueError):
df.sample(n=1, weights='weight_column', axis=1)
# Check weighting key error
with tm.assertRaises(KeyError):
df.sample(n=3, weights='not_a_real_column_name')
# Check np.nan are replaced by zeros.
weights_with_nan = [np.nan] * 10
weights_with_nan[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(
df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10})
second_column_weight = [0, 1]
assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']])
# Different axis arg types
assert_frame_equal(df.sample(n=1, axis='columns',
weights=second_column_weight),
df[['col2']])
weight = [0] * 10
weight[5] = 0.5
assert_frame_equal(df.sample(n=1, axis='rows', weights=weight),
df.iloc[5:6])
assert_frame_equal(df.sample(n=1, axis='index', weights=weight),
df.iloc[5:6])
# Check out of range axis values
with tm.assertRaises(ValueError):
df.sample(n=1, axis=2)
with tm.assertRaises(ValueError):
df.sample(n=1, axis='not_a_name')
with tm.assertRaises(ValueError):
s = pd.Series(range(10))
s.sample(n=1, axis=1)
# Test weight length compared to correct axis
with tm.assertRaises(ValueError):
df.sample(n=1, axis=1, weights=[0.5] * 10)
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10})
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
assert_frame_equal(sample1, df[['colString']])
# Test default axes
p = pd.Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6],
minor_axis=[1, 3, 5])
assert_panel_equal(
p.sample(n=3, random_state=42), p.sample(n=3, axis=1,
random_state=42))
assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0,
random_state=42))
# Test that function aligns weights with frame
df = DataFrame(
{'col1': [5, 6, 7],
'col2': ['a', 'b', 'c'], }, index=[9, 5, 3])
s = Series([1, 0, 0], index=[3, 5, 9])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))
# Weights have index values to be dropped because not in
# sampled DataFrame
s2 = Series([0.001, 0, 10000], index=[3, 5, 10])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))
# Weights have empty values to be filed with zeros
s3 = Series([0.01, 0], index=[3, 5])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))
# No overlap in weight and sampled DataFrame indices
s4 = Series([1, 0], index=[1, 2])
with tm.assertRaises(ValueError):
df.sample(1, weights=s4)
def test_size_compat(self):
# GH8846
# size property should be defined
o = self._construct(shape=10)
self.assertTrue(o.size == np.prod(o.shape))
self.assertTrue(o.size == 10 ** len(o.axes))
def test_split_compat(self):
# xref GH8846
o = self._construct(shape=10)
self.assertTrue(len(np.array_split(o, 5)) == 5)
self.assertTrue(len(np.array_split(o, 2)) == 2)
def test_unexpected_keyword(self): # GH8597
from pandas.util.testing import assertRaisesRegexp
df = DataFrame(np.random.randn(5, 2), columns=['jim', 'joe'])
ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
ts = df['joe'].copy()
ts[2] = np.nan
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.drop('joe', axis=1, in_place=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.reindex([1, 0], inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ca.fillna(0, inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ts.fillna(0, in_place=True)
class TestSeries(tm.TestCase, Generic):
_typ = Series
_comparator = lambda self, x, y: assert_series_equal(x, y)
def setUp(self):
self.ts = tm.makeTimeSeries() # Was at top level in test_series
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
def test_rename_mi(self):
s = Series([11, 21, 31],
index=MultiIndex.from_tuples(
[("A", x) for x in ["a", "B", "c"]]))
s.rename(str.lower)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = Series([1, 2, 3])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([1, '2', 3.])
result = o._get_numeric_data()
expected = Series([], dtype=object, index=pd.Index([], dtype=object))
self._compare(result, expected)
o = Series([True, False, True])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([True, False, True])
result = o._get_bool_data()
self._compare(result, o)
o = Series(date_range('20130101', periods=3))
result = o._get_numeric_data()
expected = Series([], dtype='M8[ns]', index=pd.Index([], dtype=object))
self._compare(result, expected)
def test_nonzero_single_element(self):
# allow single item via bool method
s = Series([True])
self.assertTrue(s.bool())
s = Series([False])
self.assertFalse(s.bool())
# single item nan to raise
for s in [Series([np.nan]), Series([pd.NaT]), Series([True]),
Series([False])]:
self.assertRaises(ValueError, lambda: bool(s))
for s in [Series([np.nan]), Series([pd.NaT])]:
self.assertRaises(ValueError, lambda: s.bool())
# multiple bool are still an error
for s in [Series([True, True]), Series([False, False])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
# single non-bool are an error
for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
def test_metadata_propagation_indiv(self):
# check that the metadata matches up on the resulting ops
o = Series(range(3), range(3))
o.name = 'foo'
o2 = Series(range(3), range(3))
o2.name = 'bar'
result = o.T
self.check_metadata(o, result)
# resample
ts = Series(np.random.rand(1000),
index=date_range('20130101', periods=1000, freq='s'),
name='foo')
result = ts.resample('1T').mean()
self.check_metadata(ts, result)
result = ts.resample('1T').min()
self.check_metadata(ts, result)
result = ts.resample('1T').apply(lambda x: x.sum())
self.check_metadata(ts, result)
_metadata = Series._metadata
_finalize = Series.__finalize__
Series._metadata = ['name', 'filename']
o.filename = 'foo'
o2.filename = 'bar'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat' and name == 'filename':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
Series.__finalize__ = finalize
result = pd.concat([o, o2])
self.assertEqual(result.filename, 'foo+bar')
self.assertIsNone(result.name)
# reset
Series._metadata = _metadata
Series.__finalize__ = _finalize
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_numpy_array_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_numpy_array_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
def test_interp_regression(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with tm.assertRaises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='forward')
assert_series_equal(result, expected)
result = s.interpolate(method='linear', limit=2,
limit_direction='FORWARD')
assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
self.assertRaises(ValueError, s.interpolate, method='linear', limit=2,
limit_direction='abc')
# raises an error even if no limit is specified.
self.assertRaises(ValueError, s.interpolate, method='linear',
limit_direction='abc')
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., np.nan, 7., 9., 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([1., 3., 5., np.nan, 9., 11.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12,
np.nan])
expected = Series([1., 3., 4., 5., 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([1., 3., 4., np.nan, 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5., 5., 5., 7., 9., np.nan])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([5., 5., 5., 7., 9., 9.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., np.nan, np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_all_good(self):
# scipy
tm._skip_if_no_scipy()
s = Series([1, 2, 3])
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, s)
# non-scipy
result = s.interpolate()
assert_series_equal(result, s)
def test_interp_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s = Series([1, 2, np.nan], index=idx)
expected = s.copy()
expected.loc[2] = 2
result = s.interpolate()
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
with tm.assertRaises(ValueError):
s.interpolate(method='polynomial', order=1)
def test_interp_nonmono_raise(self):
tm._skip_if_no_scipy()
s = Series([1, np.nan, 3], index=[0, 2, 1])
with tm.assertRaises(ValueError):
s.interpolate(method='krogh')
def test_interp_datetime64(self):
tm._skip_if_no_scipy()
df = Series([1, np.nan, 3], index=date_range('1/1/2000', periods=3))
result = df.interpolate(method='nearest')
expected = Series([1., 1., 3.],
index=date_range('1/1/2000', periods=3))
assert_series_equal(result, expected)
def test_interp_limit_no_nans(self):
# GH 7173
s = pd.Series([1., 2., 3.])
result = s.interpolate(limit=1)
expected = s
assert_series_equal(result, expected)
def test_describe(self):
self.series.describe()
self.ts.describe()
def test_describe_objects(self):
s = Series(['a', 'b', 'b', np.nan, np.nan, np.nan, 'c', 'd', 'a', 'a'])
result = s.describe()
expected = Series({'count': 7, 'unique': 4,
'top': 'a', 'freq': 3}, index=result.index)
assert_series_equal(result, expected)
dt = list(self.ts.index)
dt.append(dt[0])
ser = Series(dt)
rs = ser.describe()
min_date = min(dt)
max_date = max(dt)
xp = Series({'count': len(dt),
'unique': len(self.ts.index),
'first': min_date, 'last': max_date, 'freq': 2,
'top': min_date}, index=rs.index)
assert_series_equal(rs, xp)
def test_describe_empty(self):
result = pd.Series().describe()
self.assertEqual(result['count'], 0)
self.assertTrue(result.drop('count').isnull().all())
nanSeries = Series([np.nan])
nanSeries.name = 'NaN'
result = nanSeries.describe()
self.assertEqual(result['count'], 0)
self.assertTrue(result.drop('count').isnull().all())
def test_describe_none(self):
noneSeries = Series([None])
noneSeries.name = 'None'
expected = Series([0, 0], index=['count', 'unique'], name='None')
assert_series_equal(noneSeries.describe(), expected)
class TestDataFrame(tm.TestCase, Generic):
_typ = DataFrame
_comparator = lambda self, x, y: assert_frame_equal(x, y)
def test_rename_mi(self):
df = DataFrame([
11, 21, 31
], index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]]))
df.rename(str.lower)
def test_nonzero_single_element(self):
# allow single item via bool method
df = DataFrame([[True]])
self.assertTrue(df.bool())
df = DataFrame([[False]])
self.assertFalse(df.bool())
df = DataFrame([[False, False]])
self.assertRaises(ValueError, lambda: df.bool())
self.assertRaises(ValueError, lambda: bool(df))
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = DataFrame({'A': [1, '2', 3.]})
result = o._get_numeric_data()
expected = DataFrame(index=[0, 1, 2], dtype=object)
self._compare(result, expected)
def test_interp_basic(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
expected = DataFrame({'A': [1., 2., 3., 4.],
'B': [1., 4., 9., 9.],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df.interpolate()
assert_frame_equal(result, expected)
result = df.set_index('C').interpolate()
expected = df.set_index('C')
expected.loc[3, 'A'] = 3
expected.loc[5, 'B'] = 9
assert_frame_equal(result, expected)
def test_interp_bad_method(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
with tm.assertRaises(ValueError):
df.interpolate(method='not_a_method')
def test_interp_combo(self):
df = DataFrame({'A': [1., 2., np.nan, 4.],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df['A'].interpolate()
expected = Series([1., 2., 3., 4.], name='A')
assert_series_equal(result, expected)
result = df['A'].interpolate(downcast='infer')
expected = Series([1, 2, 3, 4], name='A')
assert_series_equal(result, expected)
def test_interp_nan_idx(self):
df = DataFrame({'A': [1, 2, np.nan, 4], 'B': [np.nan, 2, 3, 4]})
df = df.set_index('A')
with tm.assertRaises(NotImplementedError):
df.interpolate(method='values')
def test_interp_various(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
df = df.set_index('C')
expected = df.copy()
result = df.interpolate(method='polynomial', order=1)
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923076
assert_frame_equal(result, expected)
result = df.interpolate(method='cubic')
expected.A.loc[3] = 2.81621174
expected.A.loc[13] = 5.64146581
assert_frame_equal(result, expected)
result = df.interpolate(method='nearest')
expected.A.loc[3] = 2
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
result = df.interpolate(method='slinear')
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923077
assert_frame_equal(result, expected)
result = df.interpolate(method='zero')
expected.A.loc[3] = 2.
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
def test_interp_alt_scipy(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
result = df.interpolate(method='barycentric')
expected = df.copy()
expected.ix[2, 'A'] = 3
expected.ix[5, 'A'] = 6
assert_frame_equal(result, expected)
result = df.interpolate(method='barycentric', downcast='infer')
assert_frame_equal(result, expected.astype(np.int64))
result = df.interpolate(method='krogh')
expectedk = df.copy()
expectedk['A'] = expected['A']
assert_frame_equal(result, expectedk)
_skip_if_no_pchip()
import scipy
result = df.interpolate(method='pchip')
expected.ix[2, 'A'] = 3
if LooseVersion(scipy.__version__) >= '0.17.0':
expected.ix[5, 'A'] = 6.0
else:
expected.ix[5, 'A'] = 6.125
assert_frame_equal(result, expected)
def test_interp_rowwise(self):
df = DataFrame({0: [1, 2, np.nan, 4],
1: [2, 3, 4, np.nan],
2: [np.nan, 4, 5, 6],
3: [4, np.nan, 6, 7],
4: [1, 2, 3, 4]})
result = df.interpolate(axis=1)
expected = df.copy()
expected.loc[3, 1] = 5
expected.loc[0, 2] = 3
expected.loc[1, 3] = 3
expected[4] = expected[4].astype(np.float64)
assert_frame_equal(result, expected)
# scipy route
tm._skip_if_no_scipy()
result = df.interpolate(axis=1, method='values')
assert_frame_equal(result, expected)
result = df.interpolate(axis=0)
expected = df.interpolate()
assert_frame_equal(result, expected)
def test_rowwise_alt(self):
df = DataFrame({0: [0, .5, 1., np.nan, 4, 8, np.nan, np.nan, 64],
1: [1, 2, 3, 4, 3, 2, 1, 0, -1]})
df.interpolate(axis=0)
def test_interp_leading_nans(self):
df = DataFrame({"A": [np.nan, np.nan, .5, .25, 0],
"B": [np.nan, -3, -3.5, np.nan, -4]})
result = df.interpolate()
expected = df.copy()
expected['B'].loc[3] = -3.75
assert_frame_equal(result, expected)
tm._skip_if_no_scipy()
result = df.interpolate(method='polynomial', order=1)
assert_frame_equal(result, expected)
def test_interp_raise_on_only_mixed(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': ['a', 'b', 'c', 'd'],
'C': [np.nan, 2, 5, 7],
'D': [np.nan, np.nan, 9, 9],
'E': [1, 2, 3, 4]})
with tm.assertRaises(TypeError):
df.interpolate(axis=1)
def test_interp_inplace(self):
df = DataFrame({'a': [1., 2., np.nan, 4.]})
expected = DataFrame({'a': [1., 2., 3., 4.]})
result = df.copy()
result['a'].interpolate(inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result['a'].interpolate(inplace=True, downcast='infer')
assert_frame_equal(result, expected.astype('int64'))
def test_interp_inplace_row(self):
# GH 10395
result = DataFrame({'a': [1., 2., 3., 4.],
'b': [np.nan, 2., 3., 4.],
'c': [3, 2, 2, 2]})
expected = result.interpolate(method='linear', axis=1, inplace=False)
result.interpolate(method='linear', axis=1, inplace=True)
assert_frame_equal(result, expected)
def test_interp_ignore_all_good(self):
# GH
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 2, 3, 4],
'C': [1., 2., np.nan, 4.],
'D': [1., 2., 3., 4.]})
expected = DataFrame({'A': np.array(
[1, 2, 3, 4], dtype='float64'),
'B': np.array(
[1, 2, 3, 4], dtype='int64'),
'C': np.array(
[1., 2., 3, 4.], dtype='float64'),
'D': np.array(
[1., 2., 3., 4.], dtype='float64')})
result = df.interpolate(downcast=None)
assert_frame_equal(result, expected)
# all good
result = df[['B', 'D']].interpolate(downcast=None)
assert_frame_equal(result, df[['B', 'D']])
def test_describe(self):
tm.makeDataFrame().describe()
tm.makeMixedDataFrame().describe()
tm.makeTimeDataFrame().describe()
def test_describe_percentiles_percent_or_raw(self):
msg = 'percentiles should all be in the interval \\[0, 1\\]'
df = tm.makeDataFrame()
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[10, 50, 100])
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[2])
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[-2])
def test_describe_percentiles_equivalence(self):
df = tm.makeDataFrame()
d1 = df.describe()
d2 = df.describe(percentiles=[.25, .75])
assert_frame_equal(d1, d2)
def test_describe_percentiles_insert_median(self):
df = tm.makeDataFrame()
d1 = df.describe(percentiles=[.25, .75])
d2 = df.describe(percentiles=[.25, .5, .75])
assert_frame_equal(d1, d2)
self.assertTrue('25%' in d1.index)
self.assertTrue('75%' in d2.index)
# none above
d1 = df.describe(percentiles=[.25, .45])
d2 = df.describe(percentiles=[.25, .45, .5])
assert_frame_equal(d1, d2)
self.assertTrue('25%' in d1.index)
self.assertTrue('45%' in d2.index)
# none below
d1 = df.describe(percentiles=[.75, 1])
d2 = df.describe(percentiles=[.5, .75, 1])
assert_frame_equal(d1, d2)
self.assertTrue('75%' in d1.index)
self.assertTrue('100%' in d2.index)
# edge
d1 = df.describe(percentiles=[0, 1])
d2 = df.describe(percentiles=[0, .5, 1])
assert_frame_equal(d1, d2)
self.assertTrue('0%' in d1.index)
self.assertTrue('100%' in d2.index)
def test_describe_no_numeric(self):
df = DataFrame({'A': ['foo', 'foo', 'bar'] * 8,
'B': ['a', 'b', 'c', 'd'] * 6})
desc = df.describe()
expected = DataFrame(dict((k, v.describe())
for k, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(desc, expected)
ts = tm.makeTimeSeries()
df = DataFrame({'time': ts.index})
desc = df.describe()
self.assertEqual(desc.time['first'], min(ts.index))
def test_describe_empty_int_columns(self):
df = DataFrame([[0, 1], [1, 2]])
desc = df[df[0] < 0].describe() # works
assert_series_equal(desc.xs('count'),
Series([0, 0], dtype=float, name='count'))
self.assertTrue(isnull(desc.ix[1:]).all().all())
def test_describe_objects(self):
df = DataFrame({"C1": ['a', 'a', 'c'], "C2": ['d', 'd', 'f']})
result = df.describe()
expected = DataFrame({"C1": [3, 2, 'a', 2], "C2": [3, 2, 'd', 2]},
index=['count', 'unique', 'top', 'freq'])
assert_frame_equal(result, expected)
df = DataFrame({"C1": pd.date_range('2010-01-01', periods=4, freq='D')
})
df.loc[4] = pd.Timestamp('2010-01-04')
result = df.describe()
expected = DataFrame({"C1": [5, 4, pd.Timestamp('2010-01-04'), 2,
pd.Timestamp('2010-01-01'),
pd.Timestamp('2010-01-04')]},
index=['count', 'unique', 'top', 'freq',
'first', 'last'])
assert_frame_equal(result, expected)
# mix time and str
df['C2'] = ['a', 'a', 'b', 'c', 'a']
result = df.describe()
expected['C2'] = [5, 3, 'a', 3, np.nan, np.nan]
assert_frame_equal(result, expected)
# just str
expected = DataFrame({'C2': [5, 3, 'a', 4]},
index=['count', 'unique', 'top', 'freq'])
result = df[['C2']].describe()
# mix of time, str, numeric
df['C3'] = [2, 4, 6, 8, 2]
result = df.describe()
expected = DataFrame({"C3": [5., 4.4, 2.607681, 2., 2., 4., 6., 8.]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
assert_frame_equal(result, expected)
assert_frame_equal(df.describe(), df[['C3']].describe())
assert_frame_equal(df[['C1', 'C3']].describe(), df[['C3']].describe())
assert_frame_equal(df[['C2', 'C3']].describe(), df[['C3']].describe())
def test_describe_typefiltering(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24, dtype='int64'),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
descN = df.describe()
expected_cols = ['numC', 'numD', ]
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(descN, expected)
desc = df.describe(include=['number'])
assert_frame_equal(desc, descN)
desc = df.describe(exclude=['object', 'datetime'])
assert_frame_equal(desc, descN)
desc = df.describe(include=['float'])
assert_frame_equal(desc, descN.drop('numC', 1))
descC = df.describe(include=['O'])
expected_cols = ['catA', 'catB']
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(descC, expected)
descD = df.describe(include=['datetime'])
assert_series_equal(descD.ts, df.ts.describe())
desc = df.describe(include=['object', 'number', 'datetime'])
assert_frame_equal(desc.loc[:, ["numC", "numD"]].dropna(), descN)
assert_frame_equal(desc.loc[:, ["catA", "catB"]].dropna(), descC)
descDs = descD.sort_index() # the index order change for mixed-types
assert_frame_equal(desc.loc[:, "ts":].dropna().sort_index(), descDs)
desc = df.loc[:, 'catA':'catB'].describe(include='all')
assert_frame_equal(desc, descC)
desc = df.loc[:, 'numC':'numD'].describe(include='all')
assert_frame_equal(desc, descN)
desc = df.describe(percentiles=[], include='all')
cnt = Series(data=[4, 4, 6, 6, 6],
index=['catA', 'catB', 'numC', 'numD', 'ts'])
assert_series_equal(desc.count(), cnt)
self.assertTrue('count' in desc.index)
self.assertTrue('unique' in desc.index)
self.assertTrue('50%' in desc.index)
self.assertTrue('first' in desc.index)
desc = df.drop("ts", 1).describe(percentiles=[], include='all')
assert_series_equal(desc.count(), cnt.drop("ts"))
self.assertTrue('first' not in desc.index)
desc = df.drop(["numC", "numD"], 1).describe(percentiles=[],
include='all')
assert_series_equal(desc.count(), cnt.drop(["numC", "numD"]))
self.assertTrue('50%' not in desc.index)
def test_describe_typefiltering_category_bool(self):
df = DataFrame({'A_cat': pd.Categorical(['foo', 'foo', 'bar'] * 8),
'B_str': ['a', 'b', 'c', 'd'] * 6,
'C_bool': [True] * 12 + [False] * 12,
'D_num': np.arange(24.) + .5,
'E_ts': tm.makeTimeSeries()[:24].index})
# bool is considered numeric in describe, although not an np.number
desc = df.describe()
expected_cols = ['C_bool', 'D_num']
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(desc, expected)
desc = df.describe(include=["category"])
self.assertTrue(desc.columns.tolist() == ["A_cat"])
# 'all' includes numpy-dtypes + category
desc1 = df.describe(include="all")
desc2 = df.describe(include=[np.generic, "category"])
assert_frame_equal(desc1, desc2)
def test_describe_timedelta(self):
df = DataFrame({"td": pd.to_timedelta(np.arange(24) % 20, "D")})
self.assertTrue(df.describe().loc["mean"][0] == pd.to_timedelta(
"8d4h"))
def test_describe_typefiltering_dupcol(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
s = df.describe(include='all').shape[1]
df = pd.concat([df, df], axis=1)
s2 = df.describe(include='all').shape[1]
self.assertTrue(s2 == 2 * s)
def test_describe_typefiltering_groupby(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
G = df.groupby('catA')
self.assertTrue(G.describe(include=['number']).shape == (16, 2))
self.assertTrue(G.describe(include=['number', 'object']).shape == (22,
3))
self.assertTrue(G.describe(include='all').shape == (26, 4))
def test_describe_multi_index_df_column_names(self):
""" Test that column names persist after the describe operation."""
df = pd.DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
# GH 11517
# test for hierarchical index
hierarchical_index_df = df.groupby(['A', 'B']).mean().T
self.assertTrue(hierarchical_index_df.columns.names == ['A', 'B'])
self.assertTrue(hierarchical_index_df.describe().columns.names ==
['A', 'B'])
# test for non-hierarchical index
non_hierarchical_index_df = df.groupby(['A']).mean().T
self.assertTrue(non_hierarchical_index_df.columns.names == ['A'])
self.assertTrue(non_hierarchical_index_df.describe().columns.names ==
['A'])
def test_no_order(self):
tm._skip_if_no_scipy()
s = Series([0, 1, np.nan, 3])
with tm.assertRaises(ValueError):
s.interpolate(method='polynomial')
with tm.assertRaises(ValueError):
s.interpolate(method='spline')
def test_spline(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
result = s.interpolate(method='spline', order=1)
expected = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result, expected)
def test_spline_extrapolate(self):
tm.skip_if_no_package(
'scipy', '0.15',
'setting ext on scipy.interpolate.UnivariateSpline')
s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
result3 = s.interpolate(method='spline', order=1, ext=3)
expected3 = Series([1., 2., 3., 4., 5., 6., 6.])
assert_series_equal(result3, expected3)
result1 = s.interpolate(method='spline', order=1, ext=0)
expected1 = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result1, expected1)
def test_spline_smooth(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7])
self.assertNotEqual(s.interpolate(method='spline', order=3, s=0)[5],
s.interpolate(method='spline', order=3)[5])
def test_spline_interpolation(self):
tm._skip_if_no_scipy()
s = Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
result1 = s.interpolate(method='spline', order=1)
expected1 = s.interpolate(method='spline', order=1)
assert_series_equal(result1, expected1)
# GH #10633
def test_spline_error(self):
tm._skip_if_no_scipy()
s = pd.Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
with tm.assertRaises(ValueError):
s.interpolate(method='spline')
with tm.assertRaises(ValueError):
s.interpolate(method='spline', order=0)
def test_metadata_propagation_indiv(self):
# groupby
df = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
result = df.groupby('A').sum()
self.check_metadata(df, result)
# resample
df = DataFrame(np.random.randn(1000, 2),
index=date_range('20130101', periods=1000, freq='s'))
result = df.resample('1T')
self.check_metadata(df, result)
# merging with override
# GH 6923
_metadata = DataFrame._metadata
_finalize = DataFrame.__finalize__
np.random.seed(10)
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['a', 'b'])
df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['c', 'd'])
DataFrame._metadata = ['filename']
df1.filename = 'fname1.csv'
df2.filename = 'fname2.csv'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'merge':
left, right = other.left, other.right
value = getattr(left, name, '') + '|' + getattr(right,
name, '')
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, ''))
return self
DataFrame.__finalize__ = finalize
result = df1.merge(df2, left_on=['a'], right_on=['c'], how='inner')
self.assertEqual(result.filename, 'fname1.csv|fname2.csv')
# concat
# GH 6927
DataFrame._metadata = ['filename']
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list('ab'))
df1.filename = 'foo'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
DataFrame.__finalize__ = finalize
result = pd.concat([df1, df1])
self.assertEqual(result.filename, 'foo+foo')
# reset
DataFrame._metadata = _metadata
DataFrame.__finalize__ = _finalize
def test_tz_convert_and_localize(self):
l0 = date_range('20140701', periods=5, freq='D')
# TODO: l1 should be a PeriodIndex for testing
# after GH2106 is addressed
with tm.assertRaises(NotImplementedError):
period_range('20140701', periods=1).tz_convert('UTC')
with tm.assertRaises(NotImplementedError):
period_range('20140701', periods=1).tz_localize('UTC')
# l1 = period_range('20140701', periods=5, freq='D')
l1 = date_range('20140701', periods=5, freq='D')
int_idx = Index(range(5))
for fn in ['tz_localize', 'tz_convert']:
if fn == 'tz_convert':
l0 = l0.tz_localize('UTC')
l1 = l1.tz_localize('UTC')
for idx in [l0, l1]:
l0_expected = getattr(idx, fn)('US/Pacific')
l1_expected = getattr(idx, fn)('US/Pacific')
df1 = DataFrame(np.ones(5), index=l0)
df1 = getattr(df1, fn)('US/Pacific')
self.assertTrue(df1.index.equals(l0_expected))
# MultiIndex
# GH7846
df2 = DataFrame(np.ones(5), MultiIndex.from_arrays([l0, l1]))
df3 = getattr(df2, fn)('US/Pacific', level=0)
self.assertFalse(df3.index.levels[0].equals(l0))
self.assertTrue(df3.index.levels[0].equals(l0_expected))
self.assertTrue(df3.index.levels[1].equals(l1))
self.assertFalse(df3.index.levels[1].equals(l1_expected))
df3 = getattr(df2, fn)('US/Pacific', level=1)
self.assertTrue(df3.index.levels[0].equals(l0))
self.assertFalse(df3.index.levels[0].equals(l0_expected))
self.assertTrue(df3.index.levels[1].equals(l1_expected))
self.assertFalse(df3.index.levels[1].equals(l1))
df4 = DataFrame(np.ones(5),
MultiIndex.from_arrays([int_idx, l0]))
# TODO: untested
df5 = getattr(df4, fn)('US/Pacific', level=1) # noqa
self.assertTrue(df3.index.levels[0].equals(l0))
self.assertFalse(df3.index.levels[0].equals(l0_expected))
self.assertTrue(df3.index.levels[1].equals(l1_expected))
self.assertFalse(df3.index.levels[1].equals(l1))
# Bad Inputs
for fn in ['tz_localize', 'tz_convert']:
# Not DatetimeIndex / PeriodIndex
with tm.assertRaisesRegexp(TypeError, 'DatetimeIndex'):
df = DataFrame(index=int_idx)
df = getattr(df, fn)('US/Pacific')
# Not DatetimeIndex / PeriodIndex
with tm.assertRaisesRegexp(TypeError, 'DatetimeIndex'):
df = DataFrame(np.ones(5),
MultiIndex.from_arrays([int_idx, l0]))
df = getattr(df, fn)('US/Pacific', level=0)
# Invalid level
with tm.assertRaisesRegexp(ValueError, 'not valid'):
df = DataFrame(index=l0)
df = getattr(df, fn)('US/Pacific', level=1)
def test_set_attribute(self):
# Test for consistent setattr behavior when an attribute and a column
# have the same name (Issue #8994)
df = DataFrame({'x': [1, 2, 3]})
df.y = 2
df['y'] = [2, 4, 6]
df.y = 5
assert_equal(df.y, 5)
assert_series_equal(df['y'], Series([2, 4, 6], name='y'))
def test_pct_change(self):
# GH 11150
pnl = DataFrame([np.arange(0, 40, 10), np.arange(0, 40, 10), np.arange(
0, 40, 10)]).astype(np.float64)
pnl.iat[1, 0] = np.nan
pnl.iat[1, 1] = np.nan
pnl.iat[2, 3] = 60
mask = pnl.isnull()
for axis in range(2):
expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(
axis=axis) - 1
expected[mask] = np.nan
result = pnl.pct_change(axis=axis, fill_method='pad')
self.assert_frame_equal(result, expected)
class TestPanel(tm.TestCase, Generic):
_typ = Panel
_comparator = lambda self, x, y: assert_panel_equal(x, y)
class TestNDFrame(tm.TestCase):
# tests that don't fit elsewhere
def test_squeeze(self):
# noop
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries()]:
tm.assert_series_equal(s.squeeze(), s)
for df in [tm.makeTimeDataFrame()]:
tm.assert_frame_equal(df.squeeze(), df)
for p in [tm.makePanel()]:
tm.assert_panel_equal(p.squeeze(), p)
for p4d in [tm.makePanel4D()]:
tm.assert_panel4d_equal(p4d.squeeze(), p4d)
# squeezing
df = tm.makeTimeDataFrame().reindex(columns=['A'])
tm.assert_series_equal(df.squeeze(), df['A'])
p = tm.makePanel().reindex(items=['ItemA'])
tm.assert_frame_equal(p.squeeze(), p['ItemA'])
p = tm.makePanel().reindex(items=['ItemA'], minor_axis=['A'])
tm.assert_series_equal(p.squeeze(), p.ix['ItemA', :, 'A'])
p4d = tm.makePanel4D().reindex(labels=['label1'])
tm.assert_panel_equal(p4d.squeeze(), p4d['label1'])
p4d = tm.makePanel4D().reindex(labels=['label1'], items=['ItemA'])
tm.assert_frame_equal(p4d.squeeze(), p4d.ix['label1', 'ItemA'])
# don't fail with 0 length dimensions GH11229 & GH8999
empty_series = pd.Series([], name='five')
empty_frame = pd.DataFrame([empty_series])
empty_panel = pd.Panel({'six': empty_frame})
[tm.assert_series_equal(empty_series, higher_dim.squeeze())
for higher_dim in [empty_series, empty_frame, empty_panel]]
def test_equals(self):
s1 = pd.Series([1, 2, 3], index=[0, 2, 1])
s2 = s1.copy()
self.assertTrue(s1.equals(s2))
s1[1] = 99
self.assertFalse(s1.equals(s2))
# NaNs compare as equal
s1 = pd.Series([1, np.nan, 3, np.nan], index=[0, 2, 1, 3])
s2 = s1.copy()
self.assertTrue(s1.equals(s2))
s2[0] = 9.9
self.assertFalse(s1.equals(s2))
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s1 = Series([1, 2, np.nan], index=idx)
s2 = s1.copy()
self.assertTrue(s1.equals(s2))
# Add object dtype column with nans
index = np.random.random(10)
df1 = DataFrame(
np.random.random(10, ), index=index, columns=['floats'])
df1['text'] = 'the sky is so blue. we could use more chocolate.'.split(
)
df1['start'] = date_range('2000-1-1', periods=10, freq='T')
df1['end'] = date_range('2000-1-1', periods=10, freq='D')
df1['diff'] = df1['end'] - df1['start']
df1['bool'] = (np.arange(10) % 3 == 0)
df1.ix[::2] = nan
df2 = df1.copy()
self.assertTrue(df1['text'].equals(df2['text']))
self.assertTrue(df1['start'].equals(df2['start']))
self.assertTrue(df1['end'].equals(df2['end']))
self.assertTrue(df1['diff'].equals(df2['diff']))
self.assertTrue(df1['bool'].equals(df2['bool']))
self.assertTrue(df1.equals(df2))
self.assertFalse(df1.equals(object))
# different dtype
different = df1.copy()
different['floats'] = different['floats'].astype('float32')
self.assertFalse(df1.equals(different))
# different index
different_index = -index
different = df2.set_index(different_index)
self.assertFalse(df1.equals(different))
# different columns
different = df2.copy()
different.columns = df2.columns[::-1]
self.assertFalse(df1.equals(different))
# DatetimeIndex
index = pd.date_range('2000-1-1', periods=10, freq='T')
df1 = df1.set_index(index)
df2 = df1.copy()
self.assertTrue(df1.equals(df2))
# MultiIndex
df3 = df1.set_index(['text'], append=True)
df2 = df1.set_index(['text'], append=True)
self.assertTrue(df3.equals(df2))
df2 = df1.set_index(['floats'], append=True)
self.assertFalse(df3.equals(df2))
# NaN in index
df3 = df1.set_index(['floats'], append=True)
df2 = df1.set_index(['floats'], append=True)
self.assertTrue(df3.equals(df2))
# GH 8437
a = pd.Series([False, np.nan])
b = pd.Series([False, np.nan])
c = pd.Series(index=range(2))
d = pd.Series(index=range(2))
e = pd.Series(index=range(2))
f = pd.Series(index=range(2))
c[:-1] = d[:-1] = e[0] = f[0] = False
self.assertTrue(a.equals(a))
self.assertTrue(a.equals(b))
self.assertTrue(a.equals(c))
self.assertTrue(a.equals(d))
self.assertFalse(a.equals(e))
self.assertTrue(e.equals(f))
def test_describe_raises(self):
with tm.assertRaises(NotImplementedError):
tm.makePanel().describe()
def test_pipe(self):
df = DataFrame({'A': [1, 2, 3]})
f = lambda x, y: x ** y
result = df.pipe(f, 2)
expected =
|
DataFrame({'A': [1, 4, 9]})
|
pandas.DataFrame
|
from os.path import abspath, dirname, join, isfile, normpath, relpath
from pandas.testing import assert_frame_equal
from numpy.testing import assert_allclose
from scipy.interpolate import interp1d
import matplotlib.pylab as plt
from datetime import datetime
import mhkit.wave as wave
from io import StringIO
import pandas as pd
import numpy as np
import contextlib
import unittest
import netCDF4
import inspect
import pickle
import json
import sys
import os
import time
from random import seed, randint
testdir = dirname(abspath(__file__))
datadir = normpath(join(testdir,relpath('../../examples/data/wave')))
class TestResourceSpectrum(unittest.TestCase):
@classmethod
def setUpClass(self):
omega = np.arange(0.1,3.5,0.01)
self.f = omega/(2*np.pi)
self.Hs = 2.5
self.Tp = 8
df = self.f[1] - self.f[0]
Trep = 1/df
self.t = np.arange(0, Trep, 0.05)
@classmethod
def tearDownClass(self):
pass
def test_pierson_moskowitz_spectrum(self):
S = wave.resource.pierson_moskowitz_spectrum(self.f,self.Tp)
Tp0 = wave.resource.peak_period(S).iloc[0,0]
error = np.abs(self.Tp - Tp0)/self.Tp
self.assertLess(error, 0.01)
def test_bretschneider_spectrum(self):
S = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
Hm0 = wave.resource.significant_wave_height(S).iloc[0,0]
Tp0 = wave.resource.peak_period(S).iloc[0,0]
errorHm0 = np.abs(self.Tp - Tp0)/self.Tp
errorTp0 = np.abs(self.Hs - Hm0)/self.Hs
self.assertLess(errorHm0, 0.01)
self.assertLess(errorTp0, 0.01)
def test_surface_elevation_seed(self):
S = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
sig = inspect.signature(wave.resource.surface_elevation)
seednum = sig.parameters['seed'].default
eta0 = wave.resource.surface_elevation(S, self.t)
eta1 = wave.resource.surface_elevation(S, self.t, seed=seednum)
assert_frame_equal(eta0, eta1)
def test_surface_elevation_phasing(self):
S = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
eta0 = wave.resource.surface_elevation(S, self.t)
sig = inspect.signature(wave.resource.surface_elevation)
seednum = sig.parameters['seed'].default
np.random.seed(seednum)
phases = np.random.rand(len(S)) * 2 * np.pi
eta1 = wave.resource.surface_elevation(S, self.t, phases=phases)
assert_frame_equal(eta0, eta1)
def test_surface_elevation_phases_np_and_pd(self):
S0 = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
S1 = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs*1.1)
S = pd.concat([S0, S1], axis=1)
phases_np = np.random.rand(S.shape[0], S.shape[1]) * 2 * np.pi
phases_pd = pd.DataFrame(phases_np, index=S.index, columns=S.columns)
eta_np = wave.resource.surface_elevation(S, self.t, phases=phases_np)
eta_pd = wave.resource.surface_elevation(S, self.t, phases=phases_pd)
assert_frame_equal(eta_np, eta_pd)
def test_surface_elevation_frequency_bins_np_and_pd(self):
S0 = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
S1 = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs*1.1)
S = pd.concat([S0, S1], axis=1)
eta0 = wave.resource.surface_elevation(S, self.t)
f_bins_np = np.array([np.diff(S.index)[0]]*len(S))
f_bins_pd = pd.DataFrame(f_bins_np, index=S.index, columns=['df'])
eta_np = wave.resource.surface_elevation(S, self.t, frequency_bins=f_bins_np)
eta_pd = wave.resource.surface_elevation(S, self.t, frequency_bins=f_bins_pd)
assert_frame_equal(eta0, eta_np)
assert_frame_equal(eta_np, eta_pd)
def test_surface_elevation_moments(self):
S = wave.resource.jonswap_spectrum(self.f, self.Tp, self.Hs)
eta = wave.resource.surface_elevation(S, self.t)
dt = self.t[1] - self.t[0]
Sn = wave.resource.elevation_spectrum(eta, 1/dt, len(eta.values),
detrend=False, window='boxcar',
noverlap=0)
m0 = wave.resource.frequency_moment(S,0).m0.values[0]
m0n = wave.resource.frequency_moment(Sn,0).m0.values[0]
errorm0 = np.abs((m0 - m0n)/m0)
self.assertLess(errorm0, 0.01)
m1 = wave.resource.frequency_moment(S,1).m1.values[0]
m1n = wave.resource.frequency_moment(Sn,1).m1.values[0]
errorm1 = np.abs((m1 - m1n)/m1)
self.assertLess(errorm1, 0.01)
def test_surface_elevation_rmse(self):
S = wave.resource.jonswap_spectrum(self.f, self.Tp, self.Hs)
eta = wave.resource.surface_elevation(S, self.t)
dt = self.t[1] - self.t[0]
Sn = wave.resource.elevation_spectrum(eta, 1/dt, len(eta),
detrend=False, window='boxcar',
noverlap=0)
fSn = interp1d(Sn.index.values, Sn.values, axis=0)
rmse = (S.values - fSn(S.index.values))**2
rmse_sum = (np.sum(rmse)/len(rmse))**0.5
self.assertLess(rmse_sum, 0.02)
def test_jonswap_spectrum(self):
S = wave.resource.jonswap_spectrum(self.f, self.Tp, self.Hs)
Hm0 = wave.resource.significant_wave_height(S).iloc[0,0]
Tp0 = wave.resource.peak_period(S).iloc[0,0]
errorHm0 = np.abs(self.Tp - Tp0)/self.Tp
errorTp0 = np.abs(self.Hs - Hm0)/self.Hs
self.assertLess(errorHm0, 0.01)
self.assertLess(errorTp0, 0.01)
def test_plot_spectrum(self):
filename = abspath(join(testdir, 'wave_plot_spectrum.png'))
if isfile(filename):
os.remove(filename)
S = wave.resource.pierson_moskowitz_spectrum(self.f,self.Tp)
plt.figure()
wave.graphics.plot_spectrum(S)
plt.savefig(filename, format='png')
plt.close()
self.assertTrue(isfile(filename))
def test_plot_chakrabarti(self):
filename = abspath(join(testdir, 'wave_plot_chakrabarti.png'))
if isfile(filename):
os.remove(filename)
D = 5
H = 10
lambda_w = 200
wave.graphics.plot_chakrabarti(H, lambda_w, D)
plt.savefig(filename)
def test_plot_chakrabarti_np(self):
filename = abspath(join(testdir, 'wave_plot_chakrabarti_np.png'))
if isfile(filename):
os.remove(filename)
D = np.linspace(5, 15, 5)
H = 10 * np.ones_like(D)
lambda_w = 200 * np.ones_like(D)
wave.graphics.plot_chakrabarti(H, lambda_w, D)
plt.savefig(filename)
self.assertTrue(isfile(filename))
def test_plot_chakrabarti_pd(self):
filename = abspath(join(testdir, 'wave_plot_chakrabarti_pd.png'))
if isfile(filename):
os.remove(filename)
D = np.linspace(5, 15, 5)
H = 10 * np.ones_like(D)
lambda_w = 200 * np.ones_like(D)
df = pd.DataFrame([H.flatten(),lambda_w.flatten(),D.flatten()],
index=['H','lambda_w','D']).transpose()
wave.graphics.plot_chakrabarti(df.H, df.lambda_w, df.D)
plt.savefig(filename)
self.assertTrue(isfile(filename))
class TestResourceMetrics(unittest.TestCase):
@classmethod
def setUpClass(self):
omega = np.arange(0.1,3.5,0.01)
self.f = omega/(2*np.pi)
self.Hs = 2.5
self.Tp = 8
file_name = join(datadir, 'ValData1.json')
with open(file_name, "r") as read_file:
self.valdata1 = pd.DataFrame(json.load(read_file))
self.valdata2 = {}
file_name = join(datadir, 'ValData2_MC.json')
with open(file_name, "r") as read_file:
data = json.load(read_file)
self.valdata2['MC'] = data
for i in data.keys():
# Calculate elevation spectra
elevation = pd.DataFrame(data[i]['elevation'])
elevation.index = elevation.index.astype(float)
elevation.sort_index(inplace=True)
sample_rate = data[i]['sample_rate']
NFFT = data[i]['NFFT']
self.valdata2['MC'][i]['S'] = wave.resource.elevation_spectrum(elevation,
sample_rate, NFFT)
file_name = join(datadir, 'ValData2_AH.json')
with open(file_name, "r") as read_file:
data = json.load(read_file)
self.valdata2['AH'] = data
for i in data.keys():
# Calculate elevation spectra
elevation = pd.DataFrame(data[i]['elevation'])
elevation.index = elevation.index.astype(float)
elevation.sort_index(inplace=True)
sample_rate = data[i]['sample_rate']
NFFT = data[i]['NFFT']
self.valdata2['AH'][i]['S'] = wave.resource.elevation_spectrum(elevation,
sample_rate, NFFT)
file_name = join(datadir, 'ValData2_CDiP.json')
with open(file_name, "r") as read_file:
data = json.load(read_file)
self.valdata2['CDiP'] = data
for i in data.keys():
temp = pd.Series(data[i]['S']).to_frame('S')
temp.index = temp.index.astype(float)
self.valdata2['CDiP'][i]['S'] = temp
@classmethod
def tearDownClass(self):
pass
def test_kfromw(self):
for i in self.valdata1.columns:
f = np.array(self.valdata1[i]['w'])/(2*np.pi)
h = self.valdata1[i]['h']
rho = self.valdata1[i]['rho']
expected = self.valdata1[i]['k']
k = wave.resource.wave_number(f, h, rho)
calculated = k.loc[:,'k'].values
error = ((expected-calculated)**2).sum() # SSE
self.assertLess(error, 1e-6)
def test_kfromw_one_freq(self):
g = 9.81
f = 0.1
h = 1e9
w = np.pi*2*f # deep water dispersion
expected = w**2 / g
calculated = wave.resource.wave_number(f=f, h=h, g=g).values[0][0]
error = np.abs(expected-calculated)
self.assertLess(error, 1e-6)
def test_wave_length(self):
k_list=[1,2,10,3]
l_expected = (2.*np.pi/np.array(k_list)).tolist()
k_df = pd.DataFrame(k_list,index = [1,2,3,4])
k_series= k_df[0]
k_array=np.array(k_list)
for l in [k_list, k_df, k_series, k_array]:
l_calculated = wave.resource.wave_length(l)
self.assertListEqual(l_expected,l_calculated.tolist())
idx=0
k_int = k_list[idx]
l_calculated = wave.resource.wave_length(k_int)
self.assertEqual(l_expected[idx],l_calculated)
def test_depth_regime(self):
expected = [True,True,False,True]
l_list=[1,2,10,3]
l_df = pd.DataFrame(l_list,index = [1,2,3,4])
l_series= l_df[0]
l_array=np.array(l_list)
h = 10
for l in [l_list, l_df, l_series, l_array]:
calculated = wave.resource.depth_regime(l,h)
self.assertListEqual(expected,calculated.tolist())
idx=0
l_int = l_list[idx]
calculated = wave.resource.depth_regime(l_int,h)
self.assertEqual(expected[idx],calculated)
def test_wave_celerity(self):
# Depth regime ratio
dr_ratio=2
# small change in f will give similar value cg
f=np.linspace(20.0001,20.0005,5)
# Choose index to spike at. cg spike is inversly proportional to k
k_idx=2
k_tmp=[1, 1, 0.5, 1, 1]
k = pd.DataFrame(k_tmp, index=f)
# all shallow
cg_shallow1 = wave.resource.wave_celerity(k, h=0.0001,depth_check=True)
cg_shallow2 = wave.resource.wave_celerity(k, h=0.0001,depth_check=False)
self.assertTrue(all(cg_shallow1.squeeze().values ==
cg_shallow2.squeeze().values))
# all deep
cg = wave.resource.wave_celerity(k, h=1000,depth_check=True)
self.assertTrue(all(np.pi*f/k.squeeze().values == cg.squeeze().values))
def test_energy_flux_deep(self):
# Dependent on mhkit.resource.BS spectrum
S = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
Te = wave.resource.energy_period(S)
Hm0 = wave.resource.significant_wave_height(S)
rho=1025
g=9.80665
coeff = rho*(g**2)/(64*np.pi)
J = coeff*(Hm0.squeeze()**2)*Te.squeeze()
h=-1 # not used when deep=True
J_calc = wave.resource.energy_flux(S, h, deep=True)
self.assertTrue(J_calc.squeeze() == J)
def test_moments(self):
for file_i in self.valdata2.keys(): # for each file MC, AH, CDiP
datasets = self.valdata2[file_i]
for s in datasets.keys(): # for each set
data = datasets[s]
for m in data['m'].keys():
expected = data['m'][m]
S = data['S']
if s == 'CDiP1' or s == 'CDiP6':
f_bins=pd.Series(data['freqBinWidth'])
else:
f_bins = None
calculated = wave.resource.frequency_moment(S, int(m)
,frequency_bins=f_bins).iloc[0,0]
error = np.abs(expected-calculated)/expected
self.assertLess(error, 0.01)
def test_metrics(self):
for file_i in self.valdata2.keys(): # for each file MC, AH, CDiP
datasets = self.valdata2[file_i]
for s in datasets.keys(): # for each set
data = datasets[s]
S = data['S']
if file_i == 'CDiP':
f_bins=pd.Series(data['freqBinWidth'])
else:
f_bins = None
# Hm0
expected = data['metrics']['Hm0']
calculated = wave.resource.significant_wave_height(S,
frequency_bins=f_bins).iloc[0,0]
error = np.abs(expected-calculated)/expected
#print('Hm0', expected, calculated, error)
self.assertLess(error, 0.01)
# Te
expected = data['metrics']['Te']
calculated = wave.resource.energy_period(S,
frequency_bins=f_bins).iloc[0,0]
error = np.abs(expected-calculated)/expected
#print('Te', expected, calculated, error)
self.assertLess(error, 0.01)
# T0
expected = data['metrics']['T0']
calculated = wave.resource.average_zero_crossing_period(S,
frequency_bins=f_bins).iloc[0,0]
error = np.abs(expected-calculated)/expected
#print('T0', expected, calculated, error)
self.assertLess(error, 0.01)
# Tc
expected = data['metrics']['Tc']
calculated = wave.resource.average_crest_period(S,
# Tc = Tavg**2
frequency_bins=f_bins).iloc[0,0]**2
error = np.abs(expected-calculated)/expected
#print('Tc', expected, calculated, error)
self.assertLess(error, 0.01)
# Tm
expected = np.sqrt(data['metrics']['Tm'])
calculated = wave.resource.average_wave_period(S,
frequency_bins=f_bins).iloc[0,0]
error = np.abs(expected-calculated)/expected
#print('Tm', expected, calculated, error)
self.assertLess(error, 0.01)
# Tp
expected = data['metrics']['Tp']
calculated = wave.resource.peak_period(S).iloc[0,0]
error = np.abs(expected-calculated)/expected
#print('Tp', expected, calculated, error)
self.assertLess(error, 0.001)
# e
expected = data['metrics']['e']
calculated = wave.resource.spectral_bandwidth(S,
frequency_bins=f_bins).iloc[0,0]
error = np.abs(expected-calculated)/expected
#print('e', expected, calculated, error)
self.assertLess(error, 0.001)
# J
if file_i != 'CDiP':
for i,j in zip(data['h'],data['J']):
expected = data['J'][j]
calculated = wave.resource.energy_flux(S,i)
error = np.abs(expected-calculated.values)/expected
self.assertLess(error, 0.1)
# v
if file_i == 'CDiP':
# this should be updated to run on other datasets
expected = data['metrics']['v']
calculated = wave.resource.spectral_width(S,
frequency_bins=f_bins).iloc[0,0]
error = np.abs(expected-calculated)/expected
self.assertLess(error, 0.01)
if file_i == 'MC':
expected = data['metrics']['v']
# testing that default uniform frequency bin widths works
calculated = wave.resource.spectral_width(S).iloc[0,0]
error = np.abs(expected-calculated)/expected
self.assertLess(error, 0.01)
def test_plot_elevation_timeseries(self):
filename = abspath(join(testdir, 'wave_plot_elevation_timeseries.png'))
if isfile(filename):
os.remove(filename)
data = self.valdata2['MC']
temp = pd.DataFrame(data[list(data.keys())[0]]['elevation'])
temp.index = temp.index.astype(float)
temp.sort_index(inplace=True)
eta = temp.iloc[0:100,:]
plt.figure()
wave.graphics.plot_elevation_timeseries(eta)
plt.savefig(filename, format='png')
plt.close()
self.assertTrue(isfile(filename))
class TestResourceContours(unittest.TestCase):
@classmethod
def setUpClass(self):
f_name= 'Hm0_Te_46022.json'
self.Hm0Te = pd.read_json(join(datadir,f_name))
with open(join(datadir, 'principal_component_analysis.pkl'), 'rb') as f:
self.pca = pickle.load(f)
@classmethod
def tearDownClass(self):
pass
def test_environmental_contour(self):
Hm0Te = self.Hm0Te
df = Hm0Te[Hm0Te['Hm0'] < 20]
Hm0 = df.Hm0.values
Te = df.Te.values
dt_ss = (Hm0Te.index[2]-Hm0Te.index[1]).seconds
time_R = 100
Hm0_contour, Te_contour = wave.resource.environmental_contour(Hm0, Te,
dt_ss, time_R)
expected_contours = pd.read_csv(join(datadir,'Hm0_Te_contours_46022.csv'))
assert_allclose(expected_contours.Hm0_contour.values, Hm0_contour, rtol=1e-3)
def test__principal_component_analysis(self):
Hm0Te = self.Hm0Te
df = Hm0Te[Hm0Te['Hm0'] < 20]
Hm0 = df.Hm0.values
Te = df.Te.values
PCA = wave.resource._principal_component_analysis(Hm0,Te, bin_size=250)
assert_allclose(PCA['principal_axes'], self.pca['principal_axes'])
self.assertAlmostEqual(PCA['shift'], self.pca['shift'])
self.assertAlmostEqual(PCA['x1_fit']['mu'], self.pca['x1_fit']['mu'])
self.assertAlmostEqual(PCA['mu_fit'].slope, self.pca['mu_fit'].slope)
self.assertAlmostEqual(PCA['mu_fit'].intercept, self.pca['mu_fit'].intercept)
assert_allclose(PCA['sigma_fit']['x'], self.pca['sigma_fit']['x'])
def test_plot_environmental_contour(self):
filename = abspath(join(testdir, 'wave_plot_environmental_contour.png'))
if isfile(filename):
os.remove(filename)
Hm0Te = self.Hm0Te
df = Hm0Te[Hm0Te['Hm0'] < 20]
Hm0 = df.Hm0.values
Te = df.Te.values
dt_ss = (Hm0Te.index[2]-Hm0Te.index[1]).seconds
time_R = 100
Hm0_contour, Te_contour = wave.resource.environmental_contour(Hm0, Te,
dt_ss, time_R)
plt.figure()
wave.graphics.plot_environmental_contour(Te, Hm0,
Te_contour, Hm0_contour,
data_label='NDBC 46022',
contour_label='100-year Contour',
x_label = 'Te [s]',
y_label = 'Hm0 [m]')
plt.savefig(filename, format='png')
plt.close()
self.assertTrue(isfile(filename))
def test_plot_environmental_contour_multiyear(self):
filename = abspath(join(testdir,
'wave_plot_environmental_contour_multiyear.png'))
if isfile(filename):
os.remove(filename)
Hm0Te = self.Hm0Te
df = Hm0Te[Hm0Te['Hm0'] < 20]
Hm0 = df.Hm0.values
Te = df.Te.values
dt_ss = (Hm0Te.index[2]-Hm0Te.index[1]).seconds
time_R = np.array([100, 105, 110, 120, 150])
Hm0_contour, Te_contour = wave.resource.environmental_contour(Hm0, Te,
dt_ss, time_R)
contour_label = [f'{year}-year Contour' for year in time_R]
plt.figure()
wave.graphics.plot_environmental_contour(Te, Hm0,
Te_contour, Hm0_contour,
data_label='NDBC 46022',
contour_label=contour_label,
x_label = 'Te [s]',
y_label = 'Hm0 [m]')
plt.savefig(filename, format='png')
plt.close()
self.assertTrue(isfile(filename))
class TestPerformance(unittest.TestCase):
@classmethod
def setUpClass(self):
np.random.seed(123)
Hm0 = np.random.rayleigh(4, 100000)
Te = np.random.normal(4.5, .8, 100000)
P = np.random.normal(200, 40, 100000)
J = np.random.normal(300, 10, 100000)
self.data = pd.DataFrame({'Hm0': Hm0, 'Te': Te, 'P': P,'J': J})
self.Hm0_bins = np.arange(0,19,0.5)
self.Te_bins = np.arange(0,9,1)
@classmethod
def tearDownClass(self):
pass
def test_capture_length(self):
L = wave.performance.capture_length(self.data['P'], self.data['J'])
L_stats = wave.performance.statistics(L)
self.assertAlmostEqual(L_stats['mean'], 0.6676, 3)
def test_capture_length_matrix(self):
L = wave.performance.capture_length(self.data['P'], self.data['J'])
LM = wave.performance.capture_length_matrix(self.data['Hm0'], self.data['Te'],
L, 'std', self.Hm0_bins, self.Te_bins)
self.assertEqual(LM.shape, (38,9))
self.assertEqual(LM.isna().sum().sum(), 131)
def test_wave_energy_flux_matrix(self):
JM = wave.performance.wave_energy_flux_matrix(self.data['Hm0'], self.data['Te'],
self.data['J'], 'mean', self.Hm0_bins, self.Te_bins)
self.assertEqual(JM.shape, (38,9))
self.assertEqual(JM.isna().sum().sum(), 131)
def test_power_matrix(self):
L = wave.performance.capture_length(self.data['P'], self.data['J'])
LM = wave.performance.capture_length_matrix(self.data['Hm0'], self.data['Te'],
L, 'mean', self.Hm0_bins, self.Te_bins)
JM = wave.performance.wave_energy_flux_matrix(self.data['Hm0'], self.data['Te'],
self.data['J'], 'mean', self.Hm0_bins, self.Te_bins)
PM = wave.performance.power_matrix(LM, JM)
self.assertEqual(PM.shape, (38,9))
self.assertEqual(PM.isna().sum().sum(), 131)
def test_mean_annual_energy_production(self):
L = wave.performance.capture_length(self.data['P'], self.data['J'])
maep = wave.performance.mean_annual_energy_production_timeseries(L, self.data['J'])
self.assertAlmostEqual(maep, 1754020.077, 2)
def test_plot_matrix(self):
filename = abspath(join(testdir, 'wave_plot_matrix.png'))
if isfile(filename):
os.remove(filename)
M = wave.performance.wave_energy_flux_matrix(self.data['Hm0'], self.data['Te'],
self.data['J'], 'mean', self.Hm0_bins, self.Te_bins)
plt.figure()
wave.graphics.plot_matrix(M)
plt.savefig(filename, format='png')
plt.close()
self.assertTrue(isfile(filename))
class TestIOndbc(unittest.TestCase):
@classmethod
def setUpClass(self):
self.expected_columns_metRT = ['WDIR', 'WSPD', 'GST', 'WVHT', 'DPD',
'APD', 'MWD', 'PRES', 'ATMP', 'WTMP', 'DEWP', 'VIS', 'PTDY', 'TIDE']
self.expected_units_metRT = {'WDIR': 'degT', 'WSPD': 'm/s', 'GST': 'm/s',
'WVHT': 'm', 'DPD': 'sec', 'APD': 'sec', 'MWD': 'degT', 'PRES': 'hPa',
'ATMP': 'degC', 'WTMP': 'degC', 'DEWP': 'degC', 'VIS': 'nmi',
'PTDY': 'hPa', 'TIDE': 'ft'}
self.expected_columns_metH = ['WDIR', 'WSPD', 'GST', 'WVHT', 'DPD',
'APD', 'MWD', 'PRES', 'ATMP', 'WTMP', 'DEWP', 'VIS', 'TIDE']
self.expected_units_metH = {'WDIR': 'degT', 'WSPD': 'm/s', 'GST': 'm/s',
'WVHT': 'm', 'DPD': 'sec', 'APD': 'sec', 'MWD': 'deg', 'PRES': 'hPa',
'ATMP': 'degC', 'WTMP': 'degC', 'DEWP': 'degC', 'VIS': 'nmi',
'TIDE': 'ft'}
self.filenames=['46042w1996.txt.gz',
'46029w1997.txt.gz',
'46029w1998.txt.gz']
self.swden = pd.read_csv(join(datadir,self.filenames[0]), sep=r'\s+',
compression='gzip')
@classmethod
def tearDownClass(self):
pass
### Realtime data
def test_ndbc_read_realtime_met(self):
data, units = wave.io.ndbc.read_file(join(datadir, '46097.txt'))
expected_index0 = datetime(2019,4,2,13,50)
self.assertSetEqual(set(data.columns), set(self.expected_columns_metRT))
self.assertEqual(data.index[0], expected_index0)
self.assertEqual(data.shape, (6490, 14))
self.assertEqual(units,self.expected_units_metRT)
### Historical data
def test_ndbnc_read_historical_met(self):
# QC'd monthly data, Aug 2019
data, units = wave.io.ndbc.read_file(join(datadir, '46097h201908qc.txt'))
expected_index0 = datetime(2019,8,1,0,0)
self.assertSetEqual(set(data.columns), set(self.expected_columns_metH))
self.assertEqual(data.index[0], expected_index0)
self.assertEqual(data.shape, (4464, 13))
self.assertEqual(units,self.expected_units_metH)
### Spectral data
def test_ndbc_read_spectral(self):
data, units = wave.io.ndbc.read_file(join(datadir, 'data.txt'))
self.assertEqual(data.shape, (743, 47))
self.assertEqual(units, None)
def test_ndbc_available_data(self):
data=wave.io.ndbc.available_data('swden', buoy_number='46029')
cols = data.columns.tolist()
exp_cols = ['id', 'year', 'filename']
self.assertEqual(cols, exp_cols)
years = [int(year) for year in data.year.tolist()]
exp_years=[*range(1996,1996+len(years))]
self.assertEqual(years, exp_years)
self.assertEqual(data.shape, (len(data), 3))
def test__ndbc_parse_filenames(self):
filenames= pd.Series(self.filenames)
buoys = wave.io.ndbc._parse_filenames('swden', filenames)
years = buoys.year.tolist()
numbers = buoys.id.tolist()
fnames = buoys.filename.tolist()
self.assertEqual(buoys.shape, (len(filenames),3))
self.assertListEqual(years, ['1996','1997','1998'])
self.assertListEqual(numbers, ['46042','46029','46029'])
self.assertListEqual(fnames, self.filenames)
def test_ndbc_request_data(self):
filenames= pd.Series(self.filenames[0])
ndbc_data = wave.io.ndbc.request_data('swden', filenames)
self.assertTrue(self.swden.equals(ndbc_data['1996']))
def test_ndbc_request_data_from_dataframe(self):
filenames= pd.DataFrame(pd.Series(data=self.filenames[0]))
ndbc_data = wave.io.ndbc.request_data('swden', filenames)
assert_frame_equal(self.swden, ndbc_data['1996'])
def test_ndbc_request_data_filenames_length(self):
with self.assertRaises(AssertionError):
wave.io.ndbc.request_data('swden', pd.Series(dtype=float))
def test_ndbc_to_datetime_index(self):
dt = wave.io.ndbc.to_datetime_index('swden', self.swden)
self.assertEqual(type(dt.index), pd.DatetimeIndex)
self.assertFalse({'YY','MM','DD','hh'}.issubset(dt.columns))
def test_ndbc_request_data_empty_file(self):
temp_stdout = StringIO()
# known empty file. If NDBC replaces, this test may fail.
filename = "42008h1984.txt.gz"
buoy_id='42008'
year = '1984'
with contextlib.redirect_stdout(temp_stdout):
wave.io.ndbc.request_data('stdmet', pd.Series(filename))
output = temp_stdout.getvalue().strip()
msg = (f'The NDBC buoy {buoy_id} for year {year} with '
f'filename {filename} is empty or missing '
'data. Please omit this file from your data '
'request in the future.')
self.assertEqual(output, msg)
def test_ndbc_request_multiple_files_with_empty_file(self):
temp_stdout = StringIO()
# known empty file. If NDBC replaces, this test may fail.
empty_file = '42008h1984.txt.gz'
working_file = '46042h1996.txt.gz'
filenames = pd.Series([empty_file, working_file])
with contextlib.redirect_stdout(temp_stdout):
ndbc_data =wave.io.ndbc.request_data('stdmet', filenames)
self.assertEqual(1, len(ndbc_data))
def test_ndbc_dates_to_datetime(self):
dt = wave.io.ndbc.dates_to_datetime('swden', self.swden)
self.assertEqual(datetime(1996, 1, 1, 1, 0), dt[1])
def test_date_string_to_datetime(self):
swden = self.swden.copy(deep=True)
swden['mm'] = np.zeros(len(swden)).astype(int).astype(str)
year_string='YY'
year_fmt='%y'
parse_columns = [year_string, 'MM', 'DD', 'hh', 'mm']
df = wave.io.ndbc._date_string_to_datetime(swden, parse_columns,
year_fmt)
dt = df['date']
self.assertEqual(datetime(1996, 1, 1, 1, 0), dt[1])
def test_parameter_units(self):
parameter='swden'
units = wave.io.ndbc.parameter_units(parameter)
self.assertEqual(units[parameter], '(m*m)/Hz')
class TestWECSim(unittest.TestCase):
@classmethod
def setUpClass(self):
pass
@classmethod
def tearDownClass(self):
pass
### WEC-Sim data, mo mooring
def test_read_wecSim_no_mooring(self):
ws_output = wave.io.wecsim.read_output(join(datadir, 'RM3_matlabWorkspace_structure.mat'))
self.assertEqual(ws_output['wave'].elevation.name,'elevation')
self.assertEqual(ws_output['bodies']['body1'].name,'float')
self.assertEqual(ws_output['ptos'].name,'PTO1')
self.assertEqual(ws_output['constraints'].name,'Constraint1')
self.assertEqual(len(ws_output['mooring']),0)
self.assertEqual(len(ws_output['moorDyn']),0)
self.assertEqual(len(ws_output['ptosim']),0)
### WEC-Sim data, with mooring
def test_read_wecSim_with_mooring(self):
ws_output = wave.io.wecsim.read_output(join(datadir, 'RM3MooringMatrix_matlabWorkspace_structure.mat'))
self.assertEqual(ws_output['wave'].elevation.name,'elevation')
self.assertEqual(ws_output['bodies']['body1'].name,'float')
self.assertEqual(ws_output['ptos'].name,'PTO1')
self.assertEqual(ws_output['constraints'].name,'Constraint1')
self.assertEqual(len(ws_output['mooring']),40001)
self.assertEqual(len(ws_output['moorDyn']),0)
self.assertEqual(len(ws_output['ptosim']),0)
### WEC-Sim data, with moorDyn
def test_read_wecSim_with_moorDyn(self):
ws_output = wave.io.wecsim.read_output(join(datadir, 'RM3MoorDyn_matlabWorkspace_structure.mat'))
self.assertEqual(ws_output['wave'].elevation.name,'elevation')
self.assertEqual(ws_output['bodies']['body1'].name,'float')
self.assertEqual(ws_output['ptos'].name,'PTO1')
self.assertEqual(ws_output['constraints'].name,'Constraint1')
self.assertEqual(len(ws_output['mooring']),40001)
self.assertEqual(len(ws_output['moorDyn']),7)
self.assertEqual(len(ws_output['ptosim']),0)
class TestWPTOhindcast(unittest.TestCase):
@classmethod
def setUpClass(self):
self.my_swh = pd.read_csv(join(datadir,'hindcast/multi_year_hindcast.csv'),index_col = 'time_index',
names = ['time_index','significant_wave_height_0'],header = 0,
dtype = {'significant_wave_height_0':'float32'})
self.my_swh.index = pd.to_datetime(self.my_swh.index)
self.ml = pd.read_csv(join(datadir,'hindcast/single_year_hindcast_multiloc.csv'),index_col = 'time_index',
names = ['time_index','mean_absolute_period_0','mean_absolute_period_1'],
header = 0, dtype = {'mean_absolute_period_0':'float32',
'mean_absolute_period_1':'float32'})
self.ml.index = pd.to_datetime(self.ml.index)
self.mp = pd.read_csv(join(datadir,'hindcast/multiparm.csv'),index_col = 'time_index',
names = ['time_index','energy_period_0','mean_zero-crossing_period_0'],
header = 0, dtype = {'energy_period_0':'float32',
'mean_zero-crossing_period_0':'float32'})
self.mp.index = pd.to_datetime(self.mp.index)
self.ml_meta = pd.read_csv(join(datadir,'hindcast/multiloc_meta.csv'),index_col = 0,
names = [None,'water_depth','latitude','longitude','distance_to_shore','timezone'
,'jurisdiction'],header = 0, dtype = {'water_depth':'float32','latitude':'float32'
,'longitude':'float32','distance_to_shore':'float32','timezone':'int16'})
self.my_meta = pd.read_csv(join(datadir,'hindcast/multi_year_meta.csv'),index_col = 0,
names = [None,'water_depth','latitude','longitude','distance_to_shore','timezone'
,'jurisdiction'],header = 0, dtype = {'water_depth':'float32','latitude':'float32'
,'longitude':'float32','distance_to_shore':'float32','timezone':'int16'})
self.mp_meta = pd.read_csv(join(datadir,'hindcast/multiparm_meta.csv'),index_col = 0,
names = [None,'water_depth','latitude','longitude','distance_to_shore','timezone'
,'jurisdiction'],header = 0, dtype = {'water_depth':'float32','latitude':'float32'
,'longitude':'float32','distance_to_shore':'float32','timezone':'int16'})
@classmethod
def tearDownClass(self):
pass
### WPTO hindcast data
# only run test for one version of python per to not spam the server
# yet keep coverage high on each test
if float(sys.version[0:3]) == 3.7:
def test_multi_year_sig_wave_height(self):
data_type = '3-hour'
years = [1990,1992]
lat_lon = (44.624076,-124.280097)
parameters = 'significant_wave_height'
wave_multiyear, meta = wave.io.hindcast.request_wpto_point_data(data_type,parameters,lat_lon,years)
assert_frame_equal(self.my_swh,wave_multiyear)
assert_frame_equal(self.my_meta,meta)
elif float(sys.version[0:3]) == 3.8:
# wait five minute to ensure python 3.7 call is complete
time.sleep(300)
def test_multi_loc(self):
data_type = '3-hour'
years = [1995]
lat_lon = ((44.624076,-124.280097),(43.489171,-125.152137))
parameters = 'mean_absolute_period'
wave_multiloc, meta= wave.io.hindcast.request_wpto_point_data(data_type,
parameters,lat_lon,years)
assert_frame_equal(self.ml,wave_multiloc)
|
assert_frame_equal(self.ml_meta,meta)
|
pandas.testing.assert_frame_equal
|
import os
import numpy as np
import pandas as pd
import pygrib
from tqdm import tqdm
import logging
import datetime
#########################
###### Definitions ######
#########################
abs_base_path = os.path.dirname(os.path.abspath(__file__))
'''/home/collin/visibility-China/time_series_analysis/src/data'''
permitted_fts = ["{0:0=3d}".format(ft) for ft in range(0, 22, 3)]
param_levels = ['Visibility_0', 'Wind speed (gust)_0',
'Temperature_1000', 'Relative humidity_1000', 'U component of wind_1000', 'V component of wind_1000',
'Surface pressure_0', 'Orography_0', 'Temperature_0',
'2 metre temperature_2', '2 metre dewpoint temperature_2', '2 metre relative humidity_2',
'10 metre U wind component_10', '10 metre V wind component_10',
'Precipitation rate_0', 'Pressure reduced to MSL_0']
fieldnames = ["VIS", "WG_Surf",
"T_1000", "RH_1000", "U_1000", "V_1000",
"P_Surf", "HGT", "T_Surf",
"T_2m", "DT_2m", "RH_2m",
"U_10m", "V_10m",
"PR", "MSLP"]
name_converter_dict = {param_level:fieldnames[pl] for pl, param_level in enumerate(param_levels)}
parameterNames = [param_level.split("_")[0] for param_level in param_levels]
#########################
####### Functions #######
#########################
def logger_setup():
ima = datetime.datetime.now()
logger_date = ima.strftime("%Y%m%d_%H:%M:%S")
# Gets or creates a logger
logger = logging.getLogger(__name__)
# set log level
logger.setLevel(logging.DEBUG)
# define file handler and set formatter
file_handler = logging.FileHandler(abs_base_path + "/../../data/processed_logs/{}.log".format(logger_date))
formatter = logging.Formatter('%(asctime)s : %(levelname)s : %(name)s : %(message)s')
file_handler.setFormatter(formatter)
# add file handler to logger
logger.addHandler(file_handler)
return(logger)
### Set up logger
logger = logger_setup()
def get_filepaths(directory):
file_paths = []
for root, directories, files in os.walk(directory):
for filename in files:
filepath = os.path.join(root, filename)
file_paths.append(filepath)
return(sorted(file_paths))
def find_nearest(a, v):
i = (np.abs(a - v)).argmin()
return(a[i])
def get_grb_info_GFS(file, stn_latlon_dict):
info_dict = {}
stn_list = list(stn_latlon_dict.keys())
info_dict["airport"] = stn_list
grib_file = pygrib.open(file)
try:
grbs = grib_file.select(name = parameterNames,
level = [0, 2, 10, 1000])
except ValueError:
grbs = None
if grbs == None:
'''info_dict["tag"] = "No Tag"
dummy_array = np.arange(81*141).reshape(81, 141)
info_dict["values"], info_dict["lats"], info_dict["lons"] = dummy_array, dummy_array, dummy_array'''
info_df = pd.DataFrame()
else:
single_grb = grbs[0]
'''for key in single_grb.keys():
print("{0}: {1}".format(key, single_grb[key]))'''
ft = "{0:0=3d}".format(single_grb.forecastTime)
bt = "{0:0=2d}".format(single_grb.hour)
day = "{0:0=2d}".format(single_grb.day)
month = "{0:0=2d}".format(single_grb.month)
year = "{0:0=2d}".format(single_grb.year)
tag = year + month + day + "_" + bt + "_" + ft
logger.info("Now processing tag: {}".format(tag))
### Skip case if forecast too long
if ft not in permitted_fts:
info_df = pd.DataFrame()
logger.info("Forecast too long, therefore skipping this file")
else:
info_dict["tag"] = [tag] * len(stn_list)
info_dict["year"] = [year] * len(stn_list)
info_dict["month"] = [month] * len(stn_list)
info_dict["day"] = [day] * len(stn_list)
info_dict["ft"] = [ft] * len(stn_list)
lon_list, lat_list = single_grb.latlons()[1][0], single_grb.latlons()[0].T[0]
param_level_list = []
for grb in grbs:
param_level = "{0}_{1}".format(grb.name, grb.level)
if param_level not in param_levels:
continue
data = grb.values
forecast_values, station_list = [], []
for stn, latlon in stn_latlon_dict.items():
forecast_value = data[np.where(lat_list == find_nearest(lat_list, latlon[0]))[0],
np.where(lon_list == find_nearest(lon_list, latlon[1]))[0]]
forecast_values.append(forecast_value[0])
info_dict[name_converter_dict[param_level]] = forecast_values
info_df = pd.DataFrame(info_dict)
grib_file.close()
return(info_df)
def main():
file_paths = get_filepaths("/home/ai-corner/part2/GFS/GFS_025/2021")
logger.info("Number of file paths: {}".format(len(file_paths)))
### READ STATION INFO ###
Metar_stn = abs_base_path + "/../../references/stnlist.csv"
Metar_stn_df = pd.read_csv(Metar_stn)
stn_latlon_dict = Metar_stn_df[["ICAO_CODE", "LATD", "LOND"]].set_index("ICAO_CODE").T.to_dict("list")
stn_list = list(stn_latlon_dict.keys())
logger.info("List of aiports being processed: {}".format(stn_list))
df_list = []
for file_path in tqdm(file_paths):
info_df = get_grb_info_GFS(file_path, stn_latlon_dict)
if len(info_df.index) == 0:
continue
df_list.append(info_df)
final_df =
|
pd.concat(df_list)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import os #sistema operativo
import pandas as pd #geestionar datframes. Soporte principal para estructurar datos
import numpy as np #numeric python vectores
import matplotlib.pyplot as plt
import scipy.stats as stats #for statistical tests
import seaborn as sns
from pandas.api.types import CategoricalDtype
from scipy.stats.stats import chisquare
from statsmodels.formula.api import ols
from stargazer.stargazer import Stargazer
os.chdir('/Users/RicardoVReig/Desktop/apuntesdata/PEP12-11/data_fifa')
fifa = pd.read_csv ('fifaplayers.csv')
pd.set_option('display.max_columns', 100)
|
pd.set_option("max_rows", 100)
|
pandas.set_option
|
"""
Train a bunch of models to create metric vs perturbation task score graphs.
"""
from adversarial_models import *
from utils import *
from get_data import *
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
import numpy as np
import pandas as pd
import lime
import lime.lime_tabular
import shap
from copy import deepcopy
# Flip LIME flag to vary between lime shap
LIME = True
# Set up experiment parameters
params = Params("model_configurations/experiment_params.json")
np.random.seed(params.seed)
X, y, cols = get_and_preprocess_compas_data(params)
# add unrelated columns, setup
X['unrelated_column_one'] = np.random.choice([0,1],size=X.shape[0])
X['unrelated_column_two'] = np.random.choice([0,1],size=X.shape[0])
features = [c for c in X]
race_indc = features.index('race')
unrelated_indcs = features.index('unrelated_column_one')
unrelated_indcs1 = features.index('unrelated_column_two')
X = X.values
categorical_features = [i for i,f in enumerate(features) if f not in ['age', 'length_of_stay', 'priors_count']]
###
## The models f and psi for COMPAS. We discriminate based on race for f and concider two RANDOMLY DRAWN features to display in psi
#
# the biased model
class racist_model_f:
# Decision rule: classify negatively if race is Black
def predict(self,X):
return np.array([params.negative_outcome if x[race_indc] > 0 else params.positive_outcome for x in X])
def predict_proba(self, X):
return one_hot_encode(self.predict(X))
def score(self, X,y):
return np.sum(self.predict(X)==y) / len(X)
# the display model with one unrelated feature
class innocuous_model_psi:
# Decision rule: classify according to randomly drawn column 'unrelated column'
def predict_proba(self, X):
return one_hot_encode(np.array([params.negative_outcome if x[unrelated_indcs] > 0 else params.positive_outcome for x in X]))
# the display model with two unrelated features
class innocuous_model_psi_two:
def predict_proba(self, X):
A = np.where(X[:,unrelated_indcs] > 0, params.positive_outcome, params.negative_outcome)
B = np.where(X[:,unrelated_indcs1] > 0, params.positive_outcome, params.negative_outcome)
preds = np.logical_xor(A, B).astype(int)
return one_hot_encode(preds)
#
##
###
def experiment_main(X, y):
from sklearn.metrics import f1_score as f1
xtrain,xtest,ytrain,ytest = train_test_split(X,y,test_size=0.1)
ss = StandardScaler().fit(xtrain)
xtrain = ss.transform(xtrain)
xtest = ss.transform(xtest)
rates, pct_first = [], []
data_dict = {'trial':[], 'yhat':[], 'y':[], 'pct_occur_first':[], 'pct_occur_second':[], 'pct_occur_third':[]}
trial = 0
for n_estimators in [1,2,4,8,16,32,64]:
for max_depth in [1,2,4,8,None]:
for min_samples_split in [2,4,8,16,32,64]:
# Train the adversarial model for LIME with f and psi
estimator = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth, min_samples_split=min_samples_split)
if LIME:
adv_lime = Adversarial_Lime_Model(racist_model_f(), innocuous_model_psi()).\
train(xtrain, ytrain, estimator=estimator, feature_names=features, perturbation_multiplier=1)
adv_explainer = lime.lime_tabular.LimeTabularExplainer(xtrain, feature_names=adv_lime.get_column_names(), discretize_continuous=False)
formatted_explanations = []
for i in range(xtest.shape[0]):
exp = adv_explainer.explain_instance(xtest[i], adv_lime.predict_proba).as_list()
formatted_explanations.append(exp)
if i >= 100: break
adv_model = adv_lime
else:
background_distribution = shap.kmeans(xtrain,10)
adv_shap = Adversarial_Kernel_SHAP_Model(racist_model_f(), innocuous_model_psi()).\
train(xtrain, ytrain, estimator=estimator, feature_names=features)
adv_kerenel_explainer = shap.KernelExplainer(adv_shap.predict, background_distribution)
explanations = adv_kerenel_explainer.shap_values(xtest[:100])
formatted_explanations = []
for exp in explanations:
formatted_explanations.append([(features[i], exp[i]) for i in range(len(exp))])
adv_model = adv_shap
summary = experiment_summary(formatted_explanations, features)
pct_occur = [0]
for indc in [1,2,3]:
found = False
for tup in summary[indc]:
if tup[0] == 'race':
pct_occur.append(sum([pct_occur[-1], tup[1]]))
found = True
if not found:
pct_occur.append(pct_occur[-1])
pct_occur = pct_occur[1:]
y = adv_model.ood_training_task_ability[0]
yhat = adv_model.ood_training_task_ability[1]
trial_df = np.array([trial for _ in range(y.shape[0])])
data_dict['trial'] = np.concatenate((data_dict['trial'], trial_df))
data_dict['yhat'] = np.concatenate((data_dict['yhat'], yhat))
data_dict['y'] = np.concatenate((data_dict['y'], y))
data_dict['pct_occur_first'] = np.concatenate((data_dict['pct_occur_first'], [pct_occur[0] for _ in range(y.shape[0])]))
data_dict['pct_occur_second'] = np.concatenate((data_dict['pct_occur_second'], [pct_occur[1] for _ in range(y.shape[0])]))
data_dict['pct_occur_third'] = np.concatenate((data_dict['pct_occur_third'], [pct_occur[2] for _ in range(y.shape[0])]))
trial += 1
if trial % 50 == 0:
print ("Complete {}".format(trial+1))
df =
|
pd.DataFrame(data_dict)
|
pandas.DataFrame
|
import random
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
NaT,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestDataFrameSortValues:
def test_sort_values(self):
frame = DataFrame(
[[1, 1, 2], [3, 1, 0], [4, 5, 6]], index=[1, 2, 3], columns=list("ABC")
)
# by column (axis=0)
sorted_df = frame.sort_values(by="A")
indexer = frame["A"].argsort().values
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=["A"], ascending=[False])
tm.assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=["B", "C"])
expected = frame.loc[[2, 1, 3]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=["B", "C"], ascending=False)
tm.assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=["B", "A"], ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=2, inplace=True)
# by row (axis=1): GH#10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis="columns")
expected = frame.reindex(columns=["B", "A", "C"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
msg = r"Length of ascending \(5\) != length of by \(2\)"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=0, ascending=[True] * 5)
def test_sort_values_by_empty_list(self):
# https://github.com/pandas-dev/pandas/issues/40258
expected = DataFrame({"a": [1, 4, 2, 5, 3, 6]})
result = expected.sort_values(by=[])
tm.assert_frame_equal(result, expected)
assert result is not expected
def test_sort_values_inplace(self):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", inplace=True)
assert return_value is None
expected = frame.sort_values(by="A")
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by=1, axis=1, inplace=True)
assert return_value is None
expected = frame.sort_values(by=1, axis=1)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", ascending=False, inplace=True)
assert return_value is None
expected = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by=["A", "B"], ascending=False, inplace=True
)
assert return_value is None
expected = frame.sort_values(by=["A", "B"], ascending=False)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_multicolumn(self):
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({"A": A, "B": B, "C": np.random.randn(100)})
result = frame.sort_values(by=["A", "B"])
indexer = np.lexsort((frame["B"], frame["A"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["A", "B"], ascending=False)
indexer = np.lexsort(
(frame["B"].rank(ascending=False), frame["A"].rank(ascending=False))
)
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["B", "A"])
indexer = np.lexsort((frame["A"], frame["B"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
def test_sort_values_multicolumn_uint64(self):
# GH#9918
# uint64 multicolumn sort
df = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
}
)
df["a"] = df["a"].astype(np.uint64)
result = df.sort_values(["a", "b"])
expected = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
},
index=pd.Index([1, 0]),
)
tm.assert_frame_equal(result, expected)
def test_sort_values_nan(self):
# GH#3917
df = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]}
)
# sort one column only
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{"A": [np.nan, 8, 6, 4, 2, 1, 1], "B": [5, 4, 5, 5, np.nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3],
)
sorted_df = df.sort_values(["A"], na_position="first", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=["B", "A"])
sorted_df = df.sort_values(by=1, axis=1, na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{"A": [1, 1, 2, 4, 6, 8, np.nan], "B": [2, 9, np.nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2],
)
sorted_df = df.sort_values(["A", "B"])
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 2, 9, np.nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], ascending=[1, 0], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{"A": [8, 6, 4, 2, 1, 1, np.nan], "B": [4, 5, 5, np.nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2],
)
sorted_df = df.sort_values(["A", "B"], ascending=[0, 1], na_position="last")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_descending_sort(self):
# GH#6399
df = DataFrame(
[[2, "first"], [2, "second"], [1, "a"], [1, "b"]],
columns=["sort_col", "order"],
)
sorted_df = df.sort_values(by="sort_col", kind="mergesort", ascending=False)
tm.assert_frame_equal(df, sorted_df)
@pytest.mark.parametrize(
"expected_idx_non_na, ascending",
[
[
[3, 4, 5, 0, 1, 8, 6, 9, 7, 10, 13, 14],
[True, True],
],
[
[0, 3, 4, 5, 1, 8, 6, 7, 10, 13, 14, 9],
[True, False],
],
[
[9, 7, 10, 13, 14, 6, 8, 1, 3, 4, 5, 0],
[False, True],
],
[
[7, 10, 13, 14, 9, 6, 8, 1, 0, 3, 4, 5],
[False, False],
],
],
)
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_sort_values_stable_multicolumn_sort(
self, expected_idx_non_na, ascending, na_position
):
# GH#38426 Clarify sort_values with mult. columns / labels is stable
df = DataFrame(
{
"A": [1, 2, np.nan, 1, 1, 1, 6, 8, 4, 8, 8, np.nan, np.nan, 8, 8],
"B": [9, np.nan, 5, 2, 2, 2, 5, 4, 5, 3, 4, np.nan, np.nan, 4, 4],
}
)
# All rows with NaN in col "B" only have unique values in "A", therefore,
# only the rows with NaNs in "A" have to be treated individually:
expected_idx = (
[11, 12, 2] + expected_idx_non_na
if na_position == "first"
else expected_idx_non_na + [2, 11, 12]
)
expected = df.take(expected_idx)
sorted_df = df.sort_values(
["A", "B"], ascending=ascending, na_position=na_position
)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_categorial(self):
# GH#16793
df = DataFrame({"x": Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)})
expected = df.copy()
sorted_df = df.sort_values("x", kind="mergesort")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_datetimes(self):
# GH#3461, argsort / lexsort differences for a datetime column
df = DataFrame(
["a", "a", "a", "b", "c", "d", "e", "f", "g"],
columns=["A"],
index=date_range("20130101", periods=9),
)
dts = [
Timestamp(x)
for x in [
"2004-02-11",
"2004-01-21",
"2004-01-26",
"2005-09-20",
"2010-10-04",
"2009-05-12",
"2008-11-12",
"2010-09-28",
"2010-09-28",
]
]
df["B"] = dts[::2] + dts[1::2]
df["C"] = 2.0
df["A1"] = 3.0
df1 = df.sort_values(by="A")
df2 = df.sort_values(by=["A"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["B"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["C", "B"])
tm.assert_frame_equal(df1, df2)
def test_sort_values_frame_column_inplace_sort_exception(self, float_frame):
s = float_frame["A"]
with pytest.raises(ValueError, match="This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
def test_sort_values_nat_values_in_int_column(self):
# GH#14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
int_values = (2, int(NaT.value))
float_values = (2.0, -1.797693e308)
df = DataFrame(
{"int": int_values, "float": float_values}, columns=["int", "float"]
)
df_reversed = DataFrame(
{"int": int_values[::-1], "float": float_values[::-1]},
columns=["int", "float"],
index=[1, 0],
)
# NaT is not a "na" for int64 columns, so na_position must not
# influence the result:
df_sorted = df.sort_values(["int", "float"], na_position="last")
tm.assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["int", "float"], na_position="first")
tm.assert_frame_equal(df_sorted, df_reversed)
# reverse sorting order
df_sorted = df.sort_values(["int", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
# and now check if NaT is still considered as "na" for datetime64
# columns:
df = DataFrame(
{"datetime": [Timestamp("2016-01-01"), NaT], "float": float_values},
columns=["datetime", "float"],
)
df_reversed = DataFrame(
{"datetime": [NaT, Timestamp("2016-01-01")], "float": float_values[::-1]},
columns=["datetime", "float"],
index=[1, 0],
)
df_sorted = df.sort_values(["datetime", "float"], na_position="first")
tm.assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["datetime", "float"], na_position="last")
tm.assert_frame_equal(df_sorted, df)
# Ascending should not affect the results.
df_sorted = df.sort_values(["datetime", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
def test_sort_nat(self):
# GH 16836
d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
d2 = [
Timestamp(x)
for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"]
]
df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]]
d4 = [
Timestamp(x)
for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"]
]
expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=["a", "b"])
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_na_position_with_categories(self):
# GH#22556
# Positioning missing value properly when column is Categorical.
categories = ["A", "B", "C"]
category_indices = [0, 2, 4]
list_of_nans = [np.nan, np.nan]
na_indices = [1, 3]
na_position_first = "first"
na_position_last = "last"
column_name = "c"
reversed_categories = sorted(categories, reverse=True)
reversed_category_indices = sorted(category_indices, reverse=True)
reversed_na_indices = sorted(na_indices)
df = DataFrame(
{
column_name: Categorical(
["A", np.nan, "B", np.nan, "C"], categories=categories, ordered=True
)
}
)
# sort ascending with na first
result = df.sort_values(
by=column_name, ascending=True, na_position=na_position_first
)
expected = DataFrame(
{
column_name: Categorical(
list_of_nans + categories, categories=categories, ordered=True
)
},
index=na_indices + category_indices,
)
tm.assert_frame_equal(result, expected)
# sort ascending with na last
result = df.sort_values(
by=column_name, ascending=True, na_position=na_position_last
)
expected = DataFrame(
{
column_name: Categorical(
categories + list_of_nans, categories=categories, ordered=True
)
},
index=category_indices + na_indices,
)
tm.assert_frame_equal(result, expected)
# sort descending with na first
result = df.sort_values(
by=column_name, ascending=False, na_position=na_position_first
)
expected = DataFrame(
{
column_name: Categorical(
list_of_nans + reversed_categories,
categories=categories,
ordered=True,
)
},
index=reversed_na_indices + reversed_category_indices,
)
tm.assert_frame_equal(result, expected)
# sort descending with na last
result = df.sort_values(
by=column_name, ascending=False, na_position=na_position_last
)
expected = DataFrame(
{
column_name: Categorical(
reversed_categories + list_of_nans,
categories=categories,
ordered=True,
)
},
index=reversed_category_indices + reversed_na_indices,
)
tm.assert_frame_equal(result, expected)
def test_sort_values_nat(self):
# GH#16836
d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
d2 = [
|
Timestamp(x)
|
pandas.Timestamp
|
import pandas as pd
from eva.hardware import cpu, io
class DataFrame(object):
@io
def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False,
dimensions=None, metrics=None):
self._dimensions = None
self._metrics = None
self._native_dataframe =
|
pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy)
|
pandas.DataFrame
|
"""
Converts nonfiler tax units created from CPS-RETS into "SOI"-like records.
Input file: cpsnonf2014.csv, cpsrets.csv
Output file: prod2009_v2.csv
"""
import pandas as pd
import copy
def add_nonfiler(cpsrets, nonfiler):
ifdept = nonfiler["ifdept"]
js = nonfiler["js"]
xxocah = nonfiler["xxocah"]
xxocawh = nonfiler["xxocawh"]
xxoodep = nonfiler["xxoodep"]
xxopar = nonfiler["xxopar"]
was = nonfiler["was"]
intst = nonfiler["intst"]
dbe = nonfiler["dbe"]
alimony = nonfiler["alimony"]
bil = nonfiler["bil"]
pensions = nonfiler["pensions"]
rents = nonfiler["rents"]
fil = nonfiler["fil"]
ucomp = nonfiler["ucomp"]
socsec = nonfiler["socsec"]
wt = nonfiler["wt"]
nonfiler = copy.deepcopy(
nonfiler.filter(
regex=r"jcps\d{1,2}$|"
+ r"icps\d{1}$|"
+ "jcps100| cpsseq|"
+ r"nu\d{1,2}|nu18_dep|"
+ "n1820|n21|"
+ "elderly_dependent|wasp|"
+ "wass|xstate"
)
)
nonfiler["filer"] = 0
nonfiler["soiseq"] = 0
nonfiler["prodseq"] = 0
# Taxpayer Exemptions
nonfiler["xfpt"] = 0
nonfiler["xfst"] = 0
nonfiler.loc[(ifdept == 0, "xfpt")] = 1
nonfiler.loc[((ifdept == 0) & (js == 2), "xfst")] = 1
# SET THE C(*) ARRAY
nonfiler["agir1"] = 0
nonfiler["dsi"] = ifdept
nonfiler["efi"] = 0
nonfiler["eic"] = 0
nonfiler["elect"] = 0
nonfiler["fded"] = 0
nonfiler["flpdyr"] = 2009
nonfiler["flpdmo"] = 12
nonfiler["f2441"] = 0
nonfiler["f3800"] = 0
nonfiler["f6251"] = 0
nonfiler["f8582"] = 0
nonfiler["f8606"] = 0
nonfiler["f8829"] = 0
nonfiler["f8910"] = 0
nonfiler["ie"] = 0
nonfiler["mars"] = js
# hh has code = 4 for mars
nonfiler.loc[(nonfiler["mars"] == 3, "mars")] = 4
nonfiler["midr"] = 0
nonfiler["n20"] = 0
nonfiler["n24"] = 0
nonfiler["n25"] = 0
nonfiler["n30"] = 0
nonfiler["prep"] = 0
nonfiler["schb"] = 0
nonfiler["schcf"] = 0
nonfiler["sche"] = 0
nonfiler["tform"] = 0
nonfiler["txst"] = 0
nonfiler["xocah"] = xxocah
nonfiler["xocawh"] = xxocawh
nonfiler["xoodep"] = xxoodep
nonfiler["xopar"] = xxopar
nonfiler["xtot"] = (
nonfiler["xfpt"]
+ nonfiler["xfst"]
+ nonfiler["xocah"]
+ nonfiler["xocawh"]
+ nonfiler["xoodep"]
+ nonfiler["xopar"]
)
# SET THE F(*) ARRAY
nonfiler["e00200"] = was
nonfiler["e00300"] = intst
nonfiler["e00400"] = 0
nonfiler["e00600"] = dbe
nonfiler["e00650"] = 0
nonfiler["e00700"] = alimony
nonfiler["e00800"] = bil
nonfiler["e00900"] = 0
nonfiler["e01000"] = 0
nonfiler["e01100"] = 0
nonfiler["e01200"] = 0
nonfiler["e01400"] = 0
nonfiler["e01500"] = pensions
nonfiler["e01700"] = pensions
nonfiler["e02000"] = rents
nonfiler["e02100"] = fil
nonfiler["e02300"] = ucomp
nonfiler["e02400"] = socsec
nonfiler["e03150"] = 0
nonfiler["e03210"] = 0
nonfiler["e03220"] = 0
nonfiler["e03230"] = 0
nonfiler["e03260"] = 0
nonfiler["e03270"] = 0
nonfiler["e03240"] = 0
nonfiler["e03290"] = 0
nonfiler["e03300"] = 0
nonfiler["e03400"] = 0
nonfiler["e03500"] = 0
nonfiler["e00100"] = 0
nonfiler["p04470"] = 0
nonfiler["e04250"] = 0
nonfiler["e04600"] = 0
nonfiler["e04800"] = 0
nonfiler["e05100"] = 0
nonfiler["e05200"] = 0
nonfiler["e05800"] = 0
nonfiler["e06000"] = 0
nonfiler["e06200"] = 0
nonfiler["e06300"] = 0
nonfiler["e09600"] = 0
nonfiler["e07180"] = 0
nonfiler["e07200"] = 0
nonfiler["e07220"] = 0
nonfiler["e07220"] = 0
nonfiler["e07230"] = 0
nonfiler["e07140"] = 0
nonfiler["e07260"] = 0
nonfiler["e07300"] = 0
nonfiler["e07400"] = 0
nonfiler["e07600"] = 0
nonfiler["p08000"] = 0
nonfiler["e07150"] = 0
nonfiler["e06500"] = 0
nonfiler["e08800"] = 0
nonfiler["e09400"] = 0
nonfiler["e09700"] = 0
nonfiler["e09800"] = 0
nonfiler["e09900"] = 0
nonfiler["e10300"] = 0
nonfiler["e10700"] = 0
nonfiler["e10900"] = 0
nonfiler["e10950"] = 0
nonfiler["e10960"] = 0
nonfiler["e59560"] = 0
nonfiler["e59680"] = 0
nonfiler["e59700"] = 0
nonfiler["e11550"] = 0
nonfiler["e11070"] = 0
nonfiler["e11100"] = 0
nonfiler["e11200"] = 0
nonfiler["e11300"] = 0
nonfiler["e11400"] = 0
nonfiler["e11570"] = 0
nonfiler["e11580"] = 0
nonfiler["e11582"] = 0
nonfiler["e11583"] = 0
nonfiler["e10605"] = 0
nonfiler["e11900"] = 0
nonfiler["e12000"] = 0
nonfiler["e12200"] = 0
nonfiler["e15100"] = 0
nonfiler["e15210"] = 0
nonfiler["e15250"] = 0
nonfiler["e15360"] = 0
nonfiler["e17500"] = 0
nonfiler["e18400"] = 0
nonfiler["e18500"] = 0
nonfiler["e18600"] = 0
nonfiler["e19200"] = 0
nonfiler["e19550"] = 0
nonfiler["e19800"] = 0
nonfiler["e20100"] = 0
nonfiler["e19700"] = 0
nonfiler["e20550"] = 0
nonfiler["e20600"] = 0
nonfiler["e20400"] = 0
nonfiler["e20800"] = 0
nonfiler["e20500"] = 0
nonfiler["e21040"] = 0
nonfiler["p22250"] = 0
nonfiler["e22320"] = 0
nonfiler["e22370"] = 0
nonfiler["p23250"] = 0
nonfiler["e24515"] = 0
nonfiler["e24516"] = 0
nonfiler["e24518"] = 0
nonfiler["e24560"] = 0
nonfiler["e24598"] = 0
nonfiler["e24615"] = 0
nonfiler["e24570"] = 0
nonfiler["p25350"] = 0
nonfiler["p25380"] = 0
nonfiler["p25700"] = 0
nonfiler["e25820"] = 0
nonfiler["e25850"] = 0
nonfiler["e25860"] = 0
nonfiler["e25940"] = 0
nonfiler["e25980"] = 0
nonfiler["e25920"] = 0
nonfiler["e25960"] = 0
nonfiler["e26110"] = 0
nonfiler["e26170"] = 0
nonfiler["e26190"] = 0
nonfiler["e26160"] = 0
nonfiler["e26180"] = 0
nonfiler["e26270"] = 0
nonfiler["e26100"] = 0
nonfiler["e26390"] = 0
nonfiler["e26400"] = 0
nonfiler["e27200"] = 0
nonfiler["e30400"] = 0
nonfiler["e30500"] = 0
nonfiler["e32800"] = 0
nonfiler["e33000"] = 0
nonfiler["e53240"] = 0
nonfiler["e53280"] = 0
nonfiler["e53410"] = 0
nonfiler["e53300"] = 0
nonfiler["e53317"] = 0
nonfiler["e53458"] = 0
nonfiler["e58950"] = 0
nonfiler["e58990"] = 0
nonfiler["p60100"] = 0
nonfiler["p61850"] = 0
nonfiler["e60000"] = 0
nonfiler["e62100"] = 0
nonfiler["e62900"] = 0
nonfiler["e62720"] = 0
nonfiler["e62730"] = 0
nonfiler["e62740"] = 0
nonfiler["p65300"] = 0
nonfiler["p65400"] = 0
nonfiler["e68000"] = 0
nonfiler["e82200"] = 0
nonfiler["t27800"] = 0
nonfiler["s27860"] = 0
nonfiler["p27895"] = 0
nonfiler["p87482"] = 0
nonfiler["e87521"] = 0
nonfiler["e87530"] = 0
nonfiler["e87550"] = 0
nonfiler["p86421"] = 0
nonfiler["e52852"] = 0
nonfiler["e52872"] = 0
nonfiler["e87870"] = 0
nonfiler["e87875"] = 0
nonfiler["e87880"] = 0
nonfiler["recid"] = 0
nonfiler["s006"] = wt
nonfiler["s008"] = 0
nonfiler["s009"] = 0
nonfiler["wsamp"] = 0
nonfiler["txrt"] = 0
# weight
nonfiler["matched_weight"] = wt
final =
|
pd.concat([cpsrets, nonfiler], sort=False, ignore_index=True)
|
pandas.concat
|
# This is a test file intended to be used with pytest
# pytest automatically runs all the function starting with "test_"
# see https://docs.pytest.org for more information
import os
import sys
import numpy as np
import pandas as pd
## Add stuff to the path to enable exec outside of DSS
plugin_root = os.path.dirname(os.path.dirname(os.path.dirname((os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))))
sys.path.append(os.path.join(plugin_root, 'python-lib'))
import dku_timeseries
JUST_BEFORE_SPRING_DST =
|
pd.Timestamp('20190131 01:59:00')
|
pandas.Timestamp
|
import pandas as pd
import arcpy
from arcpy.sa import *
from arcpy.ia.Functions import FindArgumentStatistics
arcpy.CheckOutExtension("Spatial")
import streamlit as st
from streamlit.elements.map import _DEFAULT_COLOR
import webbrowser
import os
class Analysis:
def input(self):
# ask user for input
# add some error handling
self.state = st.text_input("State: ")
self.county = st.text_input("County: ")
st.write('''
Get lon, lat coordinates from an address [here](https://www.latlong.net/)
''')
self.lat = st.text_input("Latitude: ")
self.lon = st.text_input("Longitude: ")
# variables for testing
# lat = 39.147316
# lon = -75.57281
# state = 'Delaware'
# county = 'Kent'
def ras(self):
st.write("Analyzing raster layers...")
# create point geometry for lon,lat input
# again, brutal method
os.chdir("..")
os.chdir("..")
os.chdir("..")
os.chdir("GIS")
out_path = f"{os.getcwd()}\AgroPV"
os.chdir("..")
os.chdir("Python")
os.chdir("duo-tool")
os.chdir("app")
out_name = "xytable"
arcpy.management.CreateTable(out_path, out_name)
arcpy.management.AddField(out_name, "lon")
arcpy.management.AddField(out_name, "lat")
arcpy.management.DeleteField(out_name, "FIELD1")
arcpy.management.DeleteField(out_name, "OBJECTID")
with arcpy.da.InsertCursor(out_name,["lon", "lat"]) as cursor:
cursor.insertRow((self.lon, self.lat))
arcpy.MakeXYEventLayer_management(out_name, "lon", "lat", "point_input")
# raster layers and outputs
ras_list = ["USA Cropland.tif",
"USA National Commodity Cr.tif",
"WorldClim Global Mean Pre.tif",
"vertebrate-richness.tif",
"pollinators-richness.tif",
"vascular-plants-richness.tif",
"USA NLCD Land Cover.tif",
"Slope in Degrees.tif",
"PV Output.tif"]
outputs = ["primary_crop",
"crop_productivity",
"precipitation_mm",
"vertebrate_richness",
"pollinators_richness",
"plants_richness",
"land_cover",
"slope_percent",
"PV_kWh_per_kWp"]
#get raster values
count = 0
for i in ras_list:
ExtractValuesToPoints("point_input", i, f"{outputs[count]}") # use "interpolate_values" field so we're not just looking at a precise point?
count+=1
# create df
cols = ["primary_crop.shp",
"crop_productivity.shp",
"precipitation_mm.shp",
"vertebrate_richness.shp",
"pollinators_richness.shp",
"plants_richness.shp",
"land_cover.shp",
"slope_percent.shp",
"PV_kWh_per_kWp.shp"
]
self.ras_col_names = ["Crop Type",
"USA National Commodity Crop Productivity Index",
"Rainfall",
"Imperiled Vertebrates - Species Richness",
"Imperiled Pollinators - Species Richness",
"Imperiled Vascular Plants - Species Richness",
"Land Use Type",
"Topography",
"Sunlight - Solar power potential"
]
vals = []
self.rasdf = pd.DataFrame(index=self.ras_col_names)
# iterate, grab values, add to list
tot_rows = 0
for item in cols:
rows = arcpy.SearchCursor(item)
for row in rows:
tot_rows+=1
val = row.getValue("RASTERVALU")
vals.append(val)
# count = 0
# for val in vals:
# try:
# vals[count] = round(val, 2)
# except:
# pass
# count+=1
# vals_d = vals[0:tot_rows:int(tot_rows/len(cols))] #this shouldn't be needed
self.rasdf["Values"] = vals
# fix values:
# replace numeric values with non-numeric where needed (crop type, land use)
# crop type:
crop_types =
|
pd.read_csv("data/crop_vals.csv")
|
pandas.read_csv
|
# anvil_mods.py
import pandas as pd
import numpy as np
import shapely
import geopandas as gpd
import quandl
from fred import Fred
# demo api key
quandl.ApiConfig.api_key = "<KEY>"
def formatIndicatorLikeQuandl(indicator, **kwargs):
"""
Uses the FRED module to access data not included
in QUANDL's dataset. Limits the returned FRED data
to only date and value fields supplied.
Accepts a FRED-formatted string for the desired
economic index (indicator).
Returns the formatted indicator as a pandas
DataFrame for downstream processing, or
an error message.
"""
try:
# set fred instance: demo API key
fr = Fred(api_key='<KEY>',response_type='df')
# get the index and limit to start_date=start_date, end_date=end_date
indicator = fr.series.observations(indicator).loc[:, ('date', 'value')]
# drop nans
indicator.dropna(inplace=True)
# convert str date to datetime
indicator['date'] = pd.to_datetime(indicator['date'])
# check if start and end dates are present
if kwargs:
# create date mask for only dates within period of dataset
date_mask = (indicator['date'] >= kwargs['start_date']) & (indicator['date'] <= kwargs['end_date'])
# filter
indicator = indicator[date_mask]
# set the index to the date for index processing downstream
indicator.set_index('date', drop=True, inplace=True)
# rename the year col for consistency
indicator.rename({'value':'Value'}, axis=1, inplace=True)
except Exception as e:
return e
# return the index
return indicator
def convertGeoJsonGeometry(data):
"""
Convert JSON features into shapely
geometry and then convert entire json data
object into geopandas dataframe.
Accepts a JSON data object.
Returns a geopandas geodataframe or an
error.
"""
try:
# convert features to shapes so it can be converted to GDF
for d in data['features']:
d['geometry'] = shapely.geometry.shape(d['geometry'])
# covnvert to geopandas
geoframe = gpd.GeoDataFrame(pd.io.json.json_normalize(data['features'])) # comes as a geojson feature collection
# replace prefix in column names
geoframe.columns = geoframe.columns.str.replace('properties.', '')
except Exception as e:
return e
return geoframe
def convertSpecGeoJsonGeometry(data, cols):
"""
Convert JSON features into shapely
geometry and then convert entire json data
object into geopandas dataframe.
Accepts a JSON data object as well as a
list of columns to create for the dataframe
from properties listed in the JSON object.
Returns a geopandas geodataframe or an
error.
"""
try:
# extract all data and put into single list
all_parcels = []
# for each feature in the data
for feature in data['features']:
# dict container
parcel = {}
# get the keys for the feature set
keys = feature.keys()
# loop through the keys
for key in keys:
if key == 'geometry':
# convert features to shapes so it can be converted to GDF
parcel[key] = shapely.geometry.shape(feature[key])
elif key == 'properties':
# for each desired column in the property set
for col in cols:
# get property name and append to parcel
parcel[col] = feature[key][col]
else: # skip any other keys
pass
# append entire parcel to all_parcels
all_parcels.append(parcel)
# covnvert to geopandas
geoframe = gpd.GeoDataFrame(all_parcels)
except Exception as e:
return e
return geoframe
def getPeriodicIndexMovement(indicator):
"""
Get the movement of the index (a nx1 DF) for each
year desired.
Accepts a pandas DataFrame, which is an index
of economic indicators.
Note that the column values 'Year' and 'Value' are
baked into QUANDL data. Will need to check for changes
in future. A tripwire assert is added in case the change
occurs.
Returns either a numpy float val or an error message.
"""
try:
# trip wire in case col values change in QUANDL
assert 'Value' in indicator.columns, 'getIndexMovement() Value column value has changed. Edit function definition and try again..'
# set the year of the non res const for grouping
indicator['Year'] = indicator.index.year
# group the years and get the sum of the differences for each year
indicator_mvt = indicator.groupby(['Year'])['Value'].apply(lambda x: x.diff().sum())
except Exception as e:
return e
return indicator_mvt
def getAnnualIndexMovement(indicator):
"""
Get the movement of the index (a nx1 DF) for each year desired
Accepts a pd.DataFrame, which is an index
of economic indicators.
Note that the column values 'Year' and 'Value' are
baked into QUANDL data. Will need to check for changes
in future. A tripwire fault is added in case the change
occurs.
Returns either a tuple of pd.DataFrames or an error message.
"""
try:
# trip wire in case col values change in QUANDL
assert 'Value' in indicator.columns, 'getIndexMovement() Value column value has changed. Edit function definition and try again..'
# group the years and get the sum of the differences for each year
indicator_mvt = indicator.diff(-1)
# convert index to only year for .get() lookup
indicator_mvt.index = indicator_mvt.index.year
except Exception as e:
return e
# return a series
return indicator_mvt.squeeze()
def spatialJoinFeatures(parcels, features):
"""
Spatially join each parcel with the feature dataset
by intersecting based on geometry.
Parcels is a geopandas dataframe. The columns in this
frame should only be [['buff_dist', 'parcel']].
Features is a geopandas dataframe. Contains only
geometry and feature names columns.
Returns the spaital join of the two input
geopandas dataframes. Resulting frame has
4 columns: geometry, feature name, parcel
name, and index_right.
"""
try:
assert isinstance(parcels, gpd.GeoDataFrame), 'spatialJoinAmmenities first argument must be a geodataframe. You passed an %r' % type(parcels)
assert isinstance(features, gpd.GeoDataFrame), 'spatialJoinAmmenities second argument must be a geodataframe. You passed an %r' % type(features)
# make a container
parcels_w_features = gpd.GeoDataFrame()
# chunk the data to make memory usage more efficient
for chunk in np.array_split(parcels, np.round(parcels.index.size/100)):
increment = 500
iter1 = 0
iter2 = increment
size = chunk.index.size
# convert chunk back to GeoDataFrame for sjoin operation
chunk = gpd.GeoDataFrame(chunk)
if 'buff_dist' in chunk.columns: # set the right geometry in case of buffer distance
chunk = chunk.set_geometry('buff_dist')
# iterate through each chunk
while iter1 < size:
# do remaining rows
if iter2 > size:
temp_df = gpd.tools.sjoin(chunk.iloc[iter1:], features)
# iterate through sequence iter1:iter2 to use memory more efficiently
else:
temp_df = gpd.tools.sjoin(chunk.iloc[iter1:iter2], features)
# save memory if empty
if temp_df.empty:
del(temp_df)
else: # combine parcels_w_features and temp_df
parcels_w_features =
|
pd.concat([parcels_w_features, temp_df])
|
pandas.concat
|
"""
Data structure for 1-dimensional cross-sectional and time series data
"""
# pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
import itertools
import operator
import sys
import warnings
from numpy import nan, ndarray
import numpy as np
from pandas.core.common import (isnull, notnull, _ensure_index,
_is_bool_indexer, _default_index)
from pandas.core.daterange import DateRange
from pandas.core.generic import PandasObject
from pandas.core.index import Index, MultiIndex
from pandas.core.indexing import _SeriesIndexer, _maybe_droplevels
import pandas.core.datetools as datetools
import pandas._tseries as _tseries
__all__ = ['Series', 'TimeSeries']
def _numpy_lt_151():
return np.__version__ < '1.5.1'
#-------------------------------------------------------------------------------
# Wrapper function for Series arithmetic methods
def _arith_method(op, name):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def wrapper(self, other):
from pandas.core.frame import DataFrame
if isinstance(other, Series):
if self.index.equals(other.index):
return Series(op(self.values, other.values), index=self.index)
new_index = self.index + other.index
this_reindexed = self.reindex(new_index)
other_reindexed = other.reindex(new_index)
arr = op(this_reindexed.values, other_reindexed.values)
return Series(arr, index=new_index)
elif isinstance(other, DataFrame):
return NotImplemented
else:
# scalars
return Series(op(self.values, other), index=self.index)
return wrapper
def _flex_method(op, name):
def f(self, other, fill_value=None):
return self._binop(other, op, fill_value=fill_value)
f.__doc__ = """
Binary operator %s with support to substitute a fill_value for missing data
in one of the inputs
Parameters
----------
other: Series or scalar value
fill_value : None or float value, default None
Fill missing (NaN) values with this value. If both Series are
missing, the result will be missing
Returns
-------
result : Series
""" % name
f.__name__ = name
return f
#-------------------------------------------------------------------------------
# Series class
class Series(np.ndarray, PandasObject):
"""
Generic indexed (labeled) vector, including time series
Contains values in a numpy-ndarray with an optional bound index
(also an array of dates, strings, or whatever you want the 'row
names' of your series to be)
Rows can be retrieved by index value (date, string, etc.) or
relative position in the underlying array.
Operations between Series (+, -, /, *, **) align values based on
their associated index values-- they need not be the same length.
Parameters
----------
data : array-like, dict, or scalar value
Contains data stored in Series
index : array-like
Index object (or other iterable of same length as data)
Must be input if first argument is not a dict. If both a dict
and index sequence are used, the index will override the keys
found in the dict.
dtype : numpy.dtype or None
If None, dtype will be inferred
copy : boolean, default False
Copy input data
Notes
-----
If you combine two series, all values for an index position must
be present or the value for that index position will be nan. The
new index is the sorted union of the two Series indices.
Data is *not* copied from input arrays by default
"""
_AXIS_NUMBERS = {
'index' : 0
}
_AXIS_NAMES = dict((v, k) for k, v in _AXIS_NUMBERS.iteritems())
def __new__(cls, data, index=None, dtype=None, name=None, copy=False):
if isinstance(data, Series):
if index is None:
index = data.index
elif isinstance(data, dict):
if index is None:
index = Index(sorted(data.keys()))
data = [data[idx] for idx in index]
# Create array, do *not* copy data by default, infer type
try:
subarr = np.array(data, dtype=dtype, copy=copy)
except ValueError:
if dtype:
raise
subarr = np.array(data, dtype=object)
if subarr.ndim == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# If we create an empty array using a string to infer
# the dtype, NumPy will only allocate one character per entry
# so this is kind of bad. Alternately we could use np.repeat
# instead of np.empty (but then you still don't want things
# coming out as np.str_!
if isinstance(value, basestring) and dtype is None:
dtype = np.object_
if dtype is None:
subarr = np.empty(len(index), dtype=type(value))
else:
subarr = np.empty(len(index), dtype=dtype)
subarr.fill(value)
else:
return subarr.item()
elif subarr.ndim > 1:
raise Exception('Data must be 1-dimensional')
if index is None:
index = _default_index(len(subarr))
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, basestring):
subarr = np.array(data, dtype=object, copy=copy)
# Change the class of the array to be the subclass type.
subarr = subarr.view(cls)
subarr.index = index
subarr.name = name
if subarr.index.is_all_dates():
subarr = subarr.view(TimeSeries)
return subarr
def __init__(self, *args, **kwargs):
pass
def __hash__(self):
raise TypeError('unhashable type')
_index = None
def _get_index(self):
return self._index
def _set_index(self, index):
indexTypes = ndarray, Index, list, tuple
if not isinstance(index, indexTypes):
raise TypeError("Expected index to be in %s; was %s."
% (indexTypes, type(index)))
if len(self) != len(index):
raise AssertionError('Lengths of index and values did not match!')
self._index = _ensure_index(index)
index = property(fget=_get_index, fset=_set_index)
def __array_finalize__(self, obj):
"""
Gets called after any ufunc or other array operations, necessary
to pass on the index.
"""
self._index = getattr(obj, '_index', None)
def toDict(self):
return dict(self.iteritems())
def to_sparse(self, kind='block', fill_value=None):
"""
Convert Series to SparseSeries
Parameters
----------
kind : {'block', 'integer'}
fill_value : float, defaults to NaN (missing)
Returns
-------
sp : SparseSeries
"""
from pandas.core.sparse import SparseSeries
return SparseSeries(self, kind=kind, fill_value=fill_value)
def __contains__(self, key):
return key in self.index
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(ndarray.__reduce__(self))
subclass_state = (self.index, )
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
nd_state, own_state = state
ndarray.__setstate__(self, nd_state)
index, = own_state
self.index = index
def __getitem__(self, key):
"""
Returns item(s) for requested index/sequence, overrides default behavior
for series[key].
Logic is as follows:
- If key is in the index, return the value corresponding
to that index
- Otherwise, use key (presumably one integer or a sequence
of integers) to obtain values from the series. In the case
of a sequence, a 'slice' of the series (with corresponding dates)
will be returned, otherwise a single value.
"""
try:
if isinstance(self.index, MultiIndex):
return self._multilevel_index(key)
else:
values = self.values
try:
return values[self.index.get_loc(key)]
except KeyError:
if isinstance(key, (int, np.integer)):
return values[key]
raise
except TypeError:
pass
def _index_with(indexer):
return Series(self.values[indexer],
index=self.index[indexer])
# special handling of boolean data with NAs stored in object
# arrays. Sort of an elaborate hack since we can't represent boolean
# NA. Hmm
if _is_bool_indexer(key):
self._check_bool_indexer(key)
key = np.asarray(key, dtype=bool)
return _index_with(key)
# TODO: [slice(0, 5, None)] will break if you convert to ndarray,
# e.g. as requested by np.median
try:
return _index_with(key)
except Exception:
key = np.asarray(key)
return _index_with(key)
def _multilevel_index(self, key):
values = self.values
try:
loc = self.index.get_loc(key)
if isinstance(loc, slice):
# TODO: what if a level contains tuples??
new_index = self.index[loc]
new_index = _maybe_droplevels(new_index, key)
return Series(values[loc], index=new_index)
else:
return values[loc]
except KeyError:
if isinstance(key, (int, np.integer)):
return values[key]
raise Exception('Requested index not in this series!')
def get(self, key, default=None):
"""
Returns value occupying requested index, default to specified
missing value if not present
Parameters
----------
key : object
Index value looking for
default : object, optional
Value to return if key not in index
Returns
-------
y : scalar
"""
if key in self.index:
return self._get_val_at(self.index.get_loc(key))
else:
return default
# help out SparseSeries
_get_val_at = ndarray.__getitem__
def __getslice__(self, i, j):
"""
Returns a slice of the Series.
Note that the underlying values are COPIES.
The reason that the getslice returns copies is that otherwise you
will have a reference to the original series which could be
inadvertently changed
"""
return Series(self.values[i:j].copy(), index=self.index[i:j])
def __setitem__(self, key, value):
values = self.values
try:
loc = self.index.get_loc(key)
values[loc] = value
return
except KeyError:
if isinstance(key, (int, np.integer)):
values[key] = value
return
raise Exception('Requested index not in this series!')
except TypeError:
# Could not hash item
pass
self._check_bool_indexer(key)
# special handling of boolean data with NAs stored in object
# arrays. Sort of an elaborate hack since we can't represent boolean
# NA. Hmm
if isinstance(key, np.ndarray) and key.dtype == np.object_:
mask = isnull(key)
if mask.any():
raise ValueError('cannot index with vector containing '
'NA / NaN values')
if set([True, False]).issubset(set(key)):
key = np.asarray(key, dtype=bool)
values[key] = value
return
values[key] = value
def _check_bool_indexer(self, key):
# boolean indexing, need to check that the data are aligned, otherwise
# disallowed
if isinstance(key, Series) and key.dtype == np.bool_:
if not key.index.equals(self.index):
raise Exception('can only boolean index with like-indexed '
'Series or raw ndarrays')
def __setslice__(self, i, j, value):
"""Set slice equal to given value(s)"""
ndarray.__setslice__(self, i, j, value)
def __repr__(self):
"""Clean string representation of a Series"""
if len(self.index) > 500:
return self._make_repr(50)
elif len(self.index) > 0:
return _seriesRepr(self.index, self.values)
else:
return '%s' % ndarray.__repr__(self)
def _make_repr(self, max_vals=50):
vals = self.values
index = self.index
num = max_vals // 2
head = _seriesRepr(index[:num], vals[:num])
tail = _seriesRepr(index[-(max_vals - num):], vals[-(max_vals - num):])
return head + '\n...\n' + tail + '\nlength: %d' % len(vals)
def toString(self, buffer=sys.stdout, nanRep='NaN'):
print >> buffer, _seriesRepr(self.index, self.values,
nanRep=nanRep)
def __str__(self):
return repr(self)
def __iter__(self):
return iter(self.values)
def copy(self):
return Series(self.values.copy(), index=self.index)
#-------------------------------------------------------------------------------
# Arithmetic operators
__add__ = _arith_method(operator.add, '__add__')
__sub__ = _arith_method(operator.sub, '__sub__')
__mul__ = _arith_method(operator.mul, '__mul__')
__div__ = _arith_method(operator.div, '__div__')
__truediv__ = _arith_method(operator.truediv, '__truediv__')
__pow__ = _arith_method(operator.pow, '__pow__')
__truediv__ = _arith_method(operator.truediv, '__truediv__')
__radd__ = _arith_method(operator.add, '__add__')
__rmul__ = _arith_method(operator.mul, '__mul__')
__rsub__ = _arith_method(lambda x, y: y - x, '__sub__')
__rdiv__ = _arith_method(lambda x, y: y / x, '__div__')
__rtruediv__ = _arith_method(lambda x, y: y / x, '__truediv__')
__rpow__ = _arith_method(lambda x, y: y ** x, '__pow__')
# Inplace operators
__iadd__ = __add__
__isub__ = __sub__
__imul__ = __mul__
__idiv__ = __div__
__ipow__ = __pow__
#-------------------------------------------------------------------------------
# Statistics, overridden ndarray methods
# TODO: integrate bottleneck
def count(self):
"""
Return number of observations of Series.
Returns
-------
nobs : int
"""
return
|
notnull(self.values)
|
pandas.core.common.notnull
|
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series
import pandas._testing as tm
class TestDataFrameAlign:
def test_align_float(self, float_frame):
af, bf = float_frame.align(float_frame)
assert af._mgr is not float_frame._mgr
af, bf = float_frame.align(float_frame, copy=False)
assert af._mgr is float_frame._mgr
# axis = 0
other = float_frame.iloc[:-5, :3]
af, bf = float_frame.align(other, axis=0, fill_value=-1)
tm.assert_index_equal(bf.columns, other.columns)
# test fill value
join_idx = float_frame.index.join(other.index)
diff_a = float_frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
assert (diff_a_vals == -1).all()
af, bf = float_frame.align(other, join="right", axis=0)
tm.assert_index_equal(bf.columns, other.columns)
tm.assert_index_equal(bf.index, other.index)
tm.assert_index_equal(af.index, other.index)
# axis = 1
other = float_frame.iloc[:-5, :3].copy()
af, bf = float_frame.align(other, axis=1)
tm.assert_index_equal(bf.columns, float_frame.columns)
tm.assert_index_equal(bf.index, other.index)
# test fill value
join_idx = float_frame.index.join(other.index)
diff_a = float_frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
# TODO(wesm): unused?
diff_b_vals = bf.reindex(diff_b).values # noqa
assert (diff_a_vals == -1).all()
af, bf = float_frame.align(other, join="inner", axis=1)
tm.assert_index_equal(bf.columns, other.columns)
af, bf = float_frame.align(other, join="inner", axis=1, method="pad")
tm.assert_index_equal(bf.columns, other.columns)
af, bf = float_frame.align(
other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=None
)
tm.assert_index_equal(bf.index, Index([]))
af, bf = float_frame.align(
other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=0
)
tm.assert_index_equal(bf.index, Index([]))
# Try to align DataFrame to Series along bad axis
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
float_frame.align(af.iloc[0, :3], join="inner", axis=2)
# align dataframe to series with broadcast or not
idx = float_frame.index
s = Series(range(len(idx)), index=idx)
left, right = float_frame.align(s, axis=0)
tm.assert_index_equal(left.index, float_frame.index)
tm.assert_index_equal(right.index, float_frame.index)
assert isinstance(right, Series)
left, right = float_frame.align(s, broadcast_axis=1)
tm.assert_index_equal(left.index, float_frame.index)
expected = {c: s for c in float_frame.columns}
expected = DataFrame(
expected, index=float_frame.index, columns=float_frame.columns
)
tm.assert_frame_equal(right, expected)
# see gh-9558
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
result = df[df["a"] == 2]
expected = DataFrame([[2, 5]], index=[1], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
result = df.where(df["a"] == 2, 0)
expected = DataFrame({"a": [0, 2, 0], "b": [0, 5, 0]})
tm.assert_frame_equal(result, expected)
def test_align_int(self, int_frame):
# test other non-float types
other = DataFrame(index=range(5), columns=["A", "B", "C"])
af, bf = int_frame.align(other, join="inner", axis=1, method="pad")
tm.assert_index_equal(bf.columns, other.columns)
def test_align_mixed_type(self, float_string_frame):
af, bf = float_string_frame.align(
float_string_frame, join="inner", axis=1, method="pad"
)
tm.assert_index_equal(bf.columns, float_string_frame.columns)
def test_align_mixed_float(self, mixed_float_frame):
# mixed floats/ints
other = DataFrame(index=range(5), columns=["A", "B", "C"])
af, bf = mixed_float_frame.align(
other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=0
)
tm.assert_index_equal(bf.index, Index([]))
def test_align_mixed_int(self, mixed_int_frame):
other = DataFrame(index=range(5), columns=["A", "B", "C"])
af, bf = mixed_int_frame.align(
other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=0
)
tm.assert_index_equal(bf.index, Index([]))
def test_align_multiindex(self):
# GH#10665
# same test cases as test_align_multiindex in test_series.py
midx = pd.MultiIndex.from_product(
[range(2), range(3), range(2)], names=("a", "b", "c")
)
idx = pd.Index(range(2), name="b")
df1 = pd.DataFrame(np.arange(12, dtype="int64"), index=midx)
df2 = pd.DataFrame(np.arange(2, dtype="int64"), index=idx)
# these must be the same results (but flipped)
res1l, res1r = df1.align(df2, join="left")
res2l, res2r = df2.align(df1, join="right")
expl = df1
tm.assert_frame_equal(expl, res1l)
tm.assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_frame_equal(expr, res1r)
tm.assert_frame_equal(expr, res2l)
res1l, res1r = df1.align(df2, join="right")
res2l, res2r = df2.align(df1, join="left")
exp_idx = pd.MultiIndex.from_product(
[range(2), range(2), range(2)], names=("a", "b", "c")
)
expl = pd.DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_frame_equal(expl, res1l)
tm.assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_frame_equal(expr, res1r)
tm.assert_frame_equal(expr, res2l)
def test_align_series_combinations(self):
df = pd.DataFrame({"a": [1, 3, 5], "b": [1, 3, 5]}, index=list("ACE"))
s = pd.Series([1, 2, 4], index=list("ABD"), name="x")
# frame + series
res1, res2 = df.align(s, axis=0)
exp1 = pd.DataFrame(
{"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]},
index=list("ABCDE"),
)
exp2 = pd.Series([1, 2, np.nan, 4, np.nan], index=list("ABCDE"), name="x")
tm.assert_frame_equal(res1, exp1)
|
tm.assert_series_equal(res2, exp2)
|
pandas._testing.assert_series_equal
|
from __future__ import division, unicode_literals, print_function # for compatibility with Python 2 and 3
import numpy as np
import pandas as pd
import pims
import trackpy as tp
import ipywidgets as widgets
import matplotlib as mpl
import matplotlib.pyplot as plt
from taxispy.detect_peaks import detect_peaks
import math
import random
import time
import subprocess
import platform
from io import BytesIO
import os
from matplotlib.backends.backend_agg import FigureCanvasAgg
from IPython.display import display
from deap import base, creator, tools, algorithms
__author__ = "<NAME>, https://github.com/m1vg"
__version__ = "0.1.6.3"
__license__ = "MIT"
mpl.rc('image', cmap='gray')
Box = widgets.Box
VBox = widgets.VBox
Accordion = widgets.Accordion
Text = widgets.Text
IntText = widgets.IntText
FloatText = widgets.FloatText
Button = widgets.Button
Toggle = widgets.ToggleButton
HTML = widgets.HTML
IntSlider = widgets.IntSlider
Range = widgets.IntRangeSlider
FloatRange = widgets.FloatRangeSlider
Image = widgets.Image
HBox = widgets.HBox
Dropdown = widgets.Dropdown
Label = widgets.Label
Checkbox = widgets.Checkbox
# initialize the user interface.
class UserInterface(object):
def __init__(self):
self.path = []
self.frames_second = []
self.pixels_micron = []
self.fig_size = [5, 5]
self.frames =[]
self.f = []
self.t_i = []
self.slider = None
self.play = None
self.frame_range = None
self.vel = None
self.angular = None
self.ensemble_particle_id_list = None
self.counter = 0
self.interactive_ranges = False
self.vel_ensemble = None
self.angular_ensemble = None
self.trajectories_dic = None
self.cut_button = None
self.max_displacement = None
self.ax1_ind = None
self.ax2_ind = None
self.ax3_ind = None
self.ax4_ind = None
self.fig_individual = None
self.acc_vel_ensemble = None
self.av_vel_ensemble = None
self.av_vel = None
self.av_angular = None
self.av_acc_vel = None
self.peaks_table = None
# options controlling behaviour of genetic algorithm
self.generations = None
self.population = None
self.parallel = None
self.optimal_objective_function = None
self.weights_obj1 = None
self.weights_obj2 = None
self.individuals_bounds = None
# options controlling the behavior of adaptation curve.
self.single_trajectories = False
self.adaptation_curve_data = None
self.adaptation_curve_smooth_data = None
self.show_id = False
self.displacement_3_points = None
self.max_displacement_3_points = None
self.acc_vel = None
self.acc_angular = None
self.optimal_parameter_set = [4, 3, 10]
# First, create four boxes, each for one of the four sections.
self.box0 = VBox()
self.box1 = VBox()
self.box2 = VBox()
self.box3 = VBox()
self.box3_1 = VBox()
self.box4 = VBox()
self.box5 = VBox()
self.box6 = VBox()
self.peak_frame = None
self.peak_values = None
self.peak_height = None
self.excel_unfiltered = None
self.excel_filtered = None
self.lock1 = True
# Now, create accordion
self.interface = Accordion(children=[self.box1, self.box2, self.box3, self.box3_1, self.box4, self.box5])
title_list = ['File', 'Feature Identification', 'Trajectories',
'Visualization', 'Parameter Determination', 'Tumbling Frequencies']
for idx, val in enumerate(title_list):
self.interface.set_title(idx, val)
self.populate_first_window()
self.populate_second_window()
self.populate_third_window()
self.populate_fourth_window()
self.populate_fifth_window()
def populate_first_window(self):
# ###################### now lets start constructing the first box.
legend = HTML(value='<style>div.a {line-height: normal;}</style>''<div class="a">'
'Introduce the location of the folder containing frames to be analyzed in the '
'<b>Path</b> field. Then, click the <b>Load Frames</b> button. If necessary, use the '
'<b>Frames</b> slider to adjust the range of frames to be analyzed, '
'then click on <b>Cut Frames</b>.'
' Adjust numerical values for <b>Frames/s</b> and <b>Pixels/Micron</b> if necessary.'
'Then, click on <b>Process Data</b>. '
'<br><br></div>')
path = Text(description='Path',
placeholder='/Users/Figures ..',
value='/Documents/'
)
frames_second = FloatText(description='Frames/s',
value=20.3)
pixels_micron = FloatText(description='Pixels/Micron',
value=6.0)
load_data = Button(description='Process Data')
load_data.path = path
load_data.frames_second = frames_second
load_data.pixels_micron = pixels_micron
self.pixels_micron = pixels_micron
self.frames_second = frames_second
load_data.on_click(self.load_data_function)
frame_segment = Range(description='Frames', min=0, max=1000, step=1, value=[0, 100])
load_button = Button(description='Load Frames')
load_button.path = path
self.cut_button = Button(description='Cut Frames', disabled=True)
self.cut_button.on_click(self.cut_frames)
load_button.on_click(self.update_frame_segment)
self.box1.children = [legend, path, load_button, frame_segment, self.cut_button,
frames_second, pixels_micron, load_data]
def populate_second_window(self):
# ####################### now let's construct second box
legend0 = HTML(value='<style>div.a {line-height: normal;}</style>'
'<div class="a">Please select adequate values for the following parameters:<br><br></div>')
legend1 = HTML(value='<style>div.a {line-height: normal;}</style>'
'<div class="a"> <br><b>Diameter</b> is given in pixels and its value should be odd. It '
'refers to the diameter of the particles to be identified by the software. '
' <b>Min. Mass</b> '
'refers to the minimal mass (brightness) particles should have in order to be considered. '
'<b>Diameter</b> and <b>Min. Mass</b> are related. <b>Invert</b> '
'refers to the color pattern. Use if cells are represented by black objects in original '
'raw frames. '
'Adequate values for minimal mass can be extracted from the histograms below. '
'The first, an intermediate, and the last frames are characterized '
'by each of the three columns shown below. Histograms are for the mass of the particles,'
' blue circles are cells identified by '
'the software. If cells (bright objects) are not identified properly, adjust '
'parameter values. To continue, click on <b>Calculate Trajectories</b>: <br><br>'
'</div>')
diameter = IntSlider(description='Diameter', value=25, min=1, max=99, step=2, continuous_update=False)
diameter.observe(handler=self.update_hist_frames, names='value')
self.diameter = diameter
invert = Toggle(value=True,
description='Invert?'
)
invert.observe(handler=self.update_hist_frames, names='value')
self.invert = invert
min_mass = IntSlider(description='Min. Mass', value=2000, min=0, max=5000, continuous_update=False)
self.min_mass = min_mass
self.min_mass.observe(handler=self.update_hist_frames, names='value')
self.mass_histogram_box = Box()
self.frames_container = Box()
controllers = HBox()
controllers.children = [diameter, min_mass, invert]
button_calculate_trajectories = Button(description='Calculate Trajectories')
button_calculate_trajectories.on_click(self.refresh_trajectories_ensemble)
self.box2.children = [legend0, controllers, legend1, button_calculate_trajectories,
VBox([self.frames_container, self.mass_histogram_box])]
def populate_third_window(self):
# ####################### now let's construct third box
legend4 = HTML(value='<style>div.a {line-height: normal;}</style>'
'<div class="a">The trajectories shown below are calculated using following parameters:'
' </div>'
)
legend4_1 = HTML(value ='<style>div.a {line-height: normal;}</style>'
'<div class="a">'
'<b>Max. Disp.</b> refers to the maximum displacement (in pixels) '
'allowed for a cell to move between frames. <b> Min. # Frms</b> refers to '
'the minimum number of frames that a trajectory should have to be considered. '
' Please change values as required and click on <b>Recalculate</b> '
'to refresh results. '
'The number of trajectories shown in the plot on the right panel can be '
'reduced by increasing the displacement threshold (<b>Disp. Thrshld.</b>). '
'This threshold can be set to values '
'between 0 and 100% of the maximum displacement of all particles. '
'Trajectories shown exhibit a displacement that equals or surpasses the threshold set. '
'Alternatively, trajectories can be filtered by adjusting the frame range '
'(<b>Frame Rng</b>). </div>'
)
legend5 = HTML(value='')
max_displacement = IntSlider(value=self.diameter.value, description='Max. Disp.', min=0,
max=self.diameter.value*5, continuous_update=False, step=1)
self.max_displacement = max_displacement
memory = IntSlider(value=0, description='Memory', min=0, max=0, continuous_update=False, step=1)
self.memory = memory
number_frames = IntSlider(value=20, description='Min. # Frms', min=0, max=40, step=1)
self.number_frames = number_frames
self.box_trajectories_ensemble1 = Box()
self.box_trajectories_ensemble2 = Box()
self.box_trajectories_ensemble = HBox(children=[self.box_trajectories_ensemble1, self.box_trajectories_ensemble2])
controllers2 = VBox()
controllers2.children = [max_displacement, self.number_frames]
controllers3 = HBox()
controller_displ = IntSlider(value=0, description='Disp. Thrshld', min=0,
max=100, continuous_update=False, step=1)
controller_time_frame = widgets.IntRangeSlider(value=[0, 10], min=0, max=10.0, step=1,
description='Frame Rng:', disabled=False,
continuous_update=False, orientation='horizontal', readout=True)
controller_displ.observe(handler=self.filter_initial_trajectories, type='change', names='value')
controllers3.children = [controller_displ, controller_time_frame]
recalculate = Button(description='Recalculate')
recalculate.on_click(self.recalculate_link)
button_box =HBox(children=[recalculate])
self.legend6 = HTML()
self.box3.controller_time_frame = controller_time_frame
self.box3.controller_displ = controller_displ
self.box3.children = [legend4, controllers2, legend4_1, controllers3, self.box_trajectories_ensemble,
self.legend6, button_box, legend5]
def populate_fourth_window(self):
# ####################### now let's construct 3.1 Box. Visualization of a certain particle.
self.legend3_1 = HTML(value = '<style>div.a {line-height: normal;}</style>'
'<div class="a">'
'Cell trajectories identified by the software can be visualized in this window.'
' Select a trajectory from the drop-down menu and press the play button.'
'<br /><br /></div>'
)
self.trajectories_menu = Dropdown(description='Trajectory')
self.trajectories_menu.observe(handler=self.update_video_parameters, type='change', names='value')
self.video_wid = widgets.Image()
# ####################### now let's construct fourth box
ensemble = HTML('<b>Automatic Parameter Identification Using a Genetic Algorithm</b>')
description = HTML('<style>div.a {line-height: normal;}</style><div class="a">'
'In this section, key parameters for event identification, i.e., '
'# Frames, # Smooth, and Acc. Thrhld, can be automatically identified using '
'an optimization routine. Key parameters are identified by minimizing the difference'
' between the estimated and the real number of change of direction for a given set of '
' trajectories. To populate the training set, first provide the number of trajectories '
'by adjusting the <b> # Trajectories </b> slider, then click on <b>Populate</b>. A randomly '
'selected training set will appear. Update this list by providing the trajectory ID and its '
'observed number of change of direction. Alternatively, provide the name of an Excel '
'file containing two columns, one for the trajectory ID and one for its respective change '
'of direction. The headers of these columns should be "Trajectory" and "Tumbles", '
'respectively. Once the training set has been loaded from an excel file or manually typed, '
'click on <b>Estimate Parameters</b>. Please note that this step is computationally intensive'
' and might take several minutes to complete. After the optimization routine is done, '
'the button <b>Show Parameters</b> will appear and you can continue to the '
'<b>Tumbling Frequencies</b> tab. '
'<br /><br /></div>')
individual = HTML('<b>Analysis of Individual Trajectories</b>')
description_individual = HTML('Select one trajectory from the list to generate velocity plots.'
' Time can be adjusted by changing <b>Time Range</b>.')
self.individual_metrix_box = Box()
self.individual_controllers_box = VBox()
training_controller_box = VBox([HBox([IntSlider(min=0, max=10,value='10', description='# Trajectories:'),
Button(description='Populate')]),
HBox([Text(description='File:', placeholder='Enter Excel File (.xlsx)'),
Button(description='Load')]),
])
training_controller_box.children[0].children[1].on_click(self.populate_training_set)
training_controller_box.children[1].children[1].on_click(self.load_training_set)
training_set = VBox()
estimate_button = Button(description='Estimate Parameters', disabled=True)
estimate_button.on_click(self.prepare_genetic_algorithm)
optimal_parameters_box = VBox()
genetic_algorithm_controller = VBox()
self.box4.children = [individual, # 0
description_individual, # 1
self.individual_controllers_box, # 2
self.individual_metrix_box, # 3
ensemble, # 4
description, # 5
training_controller_box, # 6
training_set, # 7
genetic_algorithm_controller, # 8
estimate_button, # 9
optimal_parameters_box # 10
]
def populate_fifth_window(self):
# ####################### now let's construct fifth box
legend6 = HTML('Set parameters for data smoothing and event identification:')
legend7 = HTML('<style>div.a {line-height: normal;}</style>''<div class="a">'
'<br />Now, set thresholds to filter trajectories with anomalous behavior. '
' Use the displacement threshold to eliminate stuck cells exhibiting '
'high velocity. A threshold for the maximum number of change of directions (Max Chng Dir) '
'can be used to eliminate trajectories with excessive number of turns.<br /><br /></div>')
legend7_1 = HTML('<style>div.a {line-height: normal;}</style>''<div class="a">'
'<br />In order to calculate adaptation curves, set a value for time intervals in seconds '
'- T. int. (s) -. To calculate the adaptation time, set a threshold value for the frequency '
'of change of direction (Chg. Dir.)<br /><br /></div>')
lin_vel_threshold = widgets.BoundedIntText(value=4, min=0, max=100, step=1,
description='Velocity', disabled=False, continuous_update=True)
acc_threshold = widgets.BoundedIntText(value=10, min=0, max=1000, step=1,
description='Acceleration', disabled=False, continuous_update=False)
disp_threshold = IntSlider(value=10, description='Dsplcmt, %', min=0,
max=100, continuous_update=False, step=1)
turns_threshold = widgets.BoundedIntText(value=10, min=0, max=100, step=1, description='Max Chng Dir',
disabled=False,continuous_update=True)
frames_average = widgets.BoundedIntText(value=4, min=0, max=10, step=1,
description='# Frames', disabled=False, continuous_update=False)
smooth_cycles = widgets.BoundedIntText(value=3, min=0, max=10, step=1, description='# Smooth', disabled=False,
continuous_update=False)
time_interval = widgets.BoundedIntText(value=1, min=1, max=10, step=1,
description='T. Int. (s)', disabled=False, continuous_update=True)
change_dir_threshold = widgets.BoundedFloatText(value=0.45, min=0, max=2, step=0.05,
description='Chg. Dir. (1/s)', disabled=False,
continuous_update=True)
frame_ranges = widgets.IntRangeSlider(value=[0, 10], min=0, max=10.0, step=1,
description='Frame Rng:', disabled=False, continuous_update=False,
orientation='horizontal', readout=True)
b_calculate = Button(description='Calculate')
b_calculate.on_click(self.calculate_ensemble)
results = VBox()
results_string = HTML()
options_adaptation_curve = VBox()
data_adaptation_curve = VBox()
b_report = Button(description='Report', disabled=True)
b_report.on_click(self.generate_report)
self.box5.acceleration = acc_threshold
self.box5.lin_vel_threshold = lin_vel_threshold
self.box5.children = [legend6, # 0
frames_average, # 1
smooth_cycles, # 2
acc_threshold, # 3
legend7, # 4
lin_vel_threshold, # 5
HBox([disp_threshold, turns_threshold, frame_ranges]), # 6
legend7_1, # 7
time_interval, # 8
change_dir_threshold, # 9
b_calculate, # 10
results, # 11
results_string, # 12
options_adaptation_curve, # 13
data_adaptation_curve, # 14
b_report] # 15
self.box5.frame_ranges = frame_ranges
# ####################### now let's construct sixth box
legend8 = HTML('Adaptation times can be calculated in this window. Required parameters are the same as '
'for the Ensemble Analysis window. Note that in order for a trajectory to be considered, '
'it must be on focus for a certain number of frames. This parameter is defined in the window'
' <b>Trajectories<b> by the value of # Frames. The same is true for the parameter Max. Disp.'
'and all parameters from the window <b>Feature Identification<b>')
legend9 = HTML('First set parameters for data smoothing:')
legend10 = HTML('Now, set parameters for event identification. Then click <b>Calculate</b>')
b_calculate2 = Button(description='Calculate')
b_calculate2.on_click(self.calculate_adaptation_time)
results2 = VBox()
results_string2 = HTML()
time_interval = widgets.BoundedFloatText(value=5, min=0, max=500, step=1,description='T. Int. (s)',
disabled=False, continuous_update=False)
lin_vel_threshold2 = widgets.BoundedIntText(value=12, min=0, max=100, step=1,
description='Velocity', disabled=False, continuous_update=True)
self.box6.children = [legend8, # 0
legend9, # 1
frames_average, # 2
smooth_cycles, # 3
legend10, # 4
lin_vel_threshold2, # 5
acc_threshold, # 6
time_interval, # 7
b_calculate2, # 8
results2, # 9
results_string2, # 10
]
def load_data_function(self, b):
self.box3.controller_time_frame.observe(handler=self.filter_initial_trajectories, type='change', names='value')
# update max value of time interval for adaptation curve calculation
self.box5.children[8].max = len(self.frames)/self.frames_second.value # updated. it was [7]
# get number of frames and micron/pixel
self.pixels_micron = b.pixels_micron
self.frames_second = b.frames_second
# this function needs to do following things:
# load frames
if len(self.frames) == 0:
self.frames = pims.ImageSequence(b.path.value+'/*.jpg', as_grey=True)
# call function that plots three frames
self.populate_frames()
# generate histogram of mass distribution and place it in self.mass_histogram_box
self.refresh_histogram()
# open next window
self.interface.selected_index = 1
# Generate image for frame 0
y = mpl.pyplot
a = y.imshow(self.frames[0])
y.close()
buf = BytesIO()
canvas = FigureCanvasAgg(a.figure)
canvas.print_png(buf)
data = buf.getvalue()
self.video_wid.value = data
def refresh_histogram(self):
# identify frames
frames = [0, round(len(self.frames)/2), len(self.frames)-1]
children = [Image(value=self.get_hist_data(self.frames[element])) for element in frames]
self.mass_histogram_box.children = children
# new mass value is b['new']
# create histogram and place in box self.mass_histogram_box
def refresh_trajectories_ensemble(self, b):
# observe controller
# Generate trajectories plot and set as children of self.box_trajectories_ensemble
self.f = tp.batch(self.frames[:],
self.diameter.value,
minmass=self.min_mass.value,
invert=self.invert.value,
engine='numba',
processes='auto')
self.generate_link(self.f)
display(self.interface)
self.number_frames.max = len(self.frames)-1
self.interface.selected_index = 2
# Modify widget 'Charactrerization'
self.update_characterization_widget()
def recalculate_link(self,b):
self.generate_link(self.f)
display(self.interface)
def generate_link(self, f):
self.t = tp.link_df(f, self.max_displacement.value, memory=self.memory.value) # maximum displacement in pixels.
self.t1 = tp.filter_stubs(self.t, self.number_frames.value)
self.legend6.value = '<style>div.a {line-height: normal;}</style>''<div class="a"> Showing ' + \
str(self.t1['particle'].nunique()) + ' trajectories out of ' + \
str(self.t['particle'].nunique()) + ' total trajectories.' + ' </div>'
fig_size = [7, 7]
plt.figure(figsize=fig_size)
ax = plt.gca()
yfig = tp.plot_traj(self.t1, ax=ax)
buf = BytesIO()
canvas = FigureCanvasAgg(yfig.figure)
canvas.print_png(buf)
data_fig = buf.getvalue()
plt.close(ax.figure)
self.box_trajectories_ensemble1.children = [Image(value=data_fig)]
plt.figure(figsize=fig_size)
ax = plt.gca()
yfig = tp.plot_traj(self.t1, ax=ax)
# generate a new data frame containing X positions for each particle
x = self.t1.set_index(['frame', 'particle'])['x'].unstack()
y = self.t1.set_index(['frame', 'particle'])['y'].unstack()
id_particles = x.columns.values
self.trajectories_menu.options = id_particles
self.current_ids = id_particles
self.trajectories_menu.value = id_particles[-1]
#update .options trait of dropdown Trajectory # of the individual trajectories in characterization widget
self.update_characterization_widget()
counter = 0
for particle in id_particles:
if counter < 200:
#get x and y position
x_text = x[np.isfinite(x[particle])][particle].iloc[0]
y_text = y[np.isfinite(y[particle])][particle].iloc[0]
#plot ID
plt.text(x_text, y_text, str(particle), fontsize=10)
counter += 1
else:
break
buf = BytesIO()
canvas = FigureCanvasAgg(yfig.figure)
canvas.print_png(buf)
data_fig = buf.getvalue()
plt.close(ax.figure)
self.box_trajectories_ensemble2.children = [Image(value=data_fig)]
def populate_frames(self):
# identify frames
frames = [0, round(len(self.frames)/2), len(self.frames)-1]
children = [Image(value=self.get_fig_data(self.frames[element])) for element in frames]
self.frames_container.children = children
def get_fig_data(self, data):
# this scripts generate figure from frame data and return string that can be printed using the Figure widget.
# use preset parameters to circle cells.
f = tp.locate(data, self.diameter.value, minmass=self.min_mass.value, invert=self.invert.value) # frame number, diameter of particle
plt.figure(figsize=[5, 4])
ax = plt.gca()
ax.set(xlabel='y, [px]', ylabel='x, [px] ')
y = tp.annotate(f, data, invert=self.invert.value, color='blue', ax=ax) # modify the function 'annotate so that I dont get output.'
buf = BytesIO()
canvas = FigureCanvasAgg(y.figure)
canvas.print_png(buf)
data_fig = buf.getvalue()
plt.close(ax.figure)
return data_fig
def get_hist_data(self, data):
plt.figure(figsize=[5, 4])
ax = plt.gca()
f = tp.locate(data, self.diameter.value, minmass=self.min_mass.value, invert=self.invert.value) # frame number, size of particle
ax.hist(f['mass'], bins=20)
# Optionally, label the axes.
ax.set(xlabel='mass', ylabel='count')
buf = BytesIO()
canvas = FigureCanvasAgg(ax.figure)
canvas.print_png(buf)
data_fig = buf.getvalue()
plt.close(ax.figure)
return data_fig
def update_hist_frames(self, b):
self.refresh_histogram()
self.populate_frames()
def update_video_parameters(self, b):
self.slider = None
self.play = None
# this function gets called when a certain particle is selected. i.e, when the drop-down menu
# self.trajectories_menu changes its trait value.
# Generate matrix specific for one particle
self.t_i = self.t1[self.t1['particle'] == b['new']]
# update self.video_wid.value with the first image.
if len(self.t_i) != 0:
first_frame = self.t_i['frame'].iloc[0]
plt.figure(figsize=[6, 6])
ax = plt.gca()
ax.set(xlabel='x, [px]', ylabel='y, [px]')
y = tp.annotate(self.t_i[self.t_i['frame'] == first_frame], self.frames[first_frame],
color='blue', invert=False, ax=ax);
buf = BytesIO()
canvas = FigureCanvasAgg(y.figure)
canvas.print_png(buf)
data_fig = buf.getvalue()
plt.close(ax.figure)
self.video_wid.value = data_fig
#update values of self.play & self.slider.
self.play = widgets.Play(
value=0,
min=0,
max=len(self.t_i['frame']),
step=1,
description="Press play",
disabled=False)
self.slider = widgets.IntSlider(continuous_update=True,
value=0, min=0, max=len(self.t_i['frame']),
description='Frame #')
widgets.jslink((self.play, 'value'), (self.slider, 'value'))
self.slider.observe(handler=self.update_video, type='change', names='value')
self.trajectories_menu.observe(handler=self.update_video_parameters, type='change', names='value')
single_trajectory = self.get_single_trajectory(self.t_i)
self.box3_1.children = [self.legend3_1, widgets.HBox([self.trajectories_menu, self.play, self.slider]),
HBox([Box([self.video_wid]), Box([single_trajectory])])]
def update_video(self, b):
counter = b['new'] # contains iloc of self.t_i
if counter < len(self.t_i):
frame_id = self.t_i['frame'].iloc[counter]
plt.figure(figsize=[6, 6])
ax = plt.gca()
ax.set(xlabel='x, [px]', ylabel='y, [px]')
y = tp.annotate(self.t_i[self.t_i['frame'] == frame_id], self.frames[frame_id],
color='blue', invert=False, ax=ax);
plt.text(100, 100, str(round(frame_id/self.frames_second.value, 3)) + ' s', color='white')
buf = BytesIO()
canvas = FigureCanvasAgg(y.figure)
canvas.print_png(buf)
data_fig = buf.getvalue()
plt.close(ax.figure)
self.video_wid.value = data_fig
def update_characterization_widget(self):
# current ids are in self.current_ids
# update ensemble Box. Target box: self.ensemble_controllers_box.children
# update individual box. Target: self.individual_controllers_box.children
self.trajectories_id = Dropdown(description='Trajectory #', options=self.current_ids)
self.trajectories_id.observe(handler=self.update_frame_range, type='change', names='value')
self.trajectories_id.value = self.current_ids[-1]
self.box4.children[6].children[0].children[0].max = len(self.current_ids)
def update_frame_range(self, b):
# b['new'] contains the ID of the particle.
t_i = self.t1[self.t1['particle'] == b['new']]
min_value = t_i['frame'].iloc[0]/self.frames_second.value if t_i['frame'].iloc[0]/self.frames_second.value != 0 \
else 1/self.frames_second.value
max_value = t_i['frame'].iloc[-1]/self.frames_second.value
frame_range = FloatRange(value=[min_value, max_value],
min=min_value,
max=max_value,
step=1/self.frames_second.value,
description='Time Range',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f')
threshold_mean = widgets.BoundedIntText(
value=self.optimal_parameter_set[0],
min=0,
max=len(self.frames),
step=1,
description='# Frames',
disabled=False,
continuous_update=True)
smooth_cycles = widgets.BoundedIntText(
value=self.optimal_parameter_set[1],
min=0,
max=10,
step=1,
description='# Smooth',
disabled=False,
continuous_update=True)
acceleration_threshold = widgets.BoundedIntText(
value=self.optimal_parameter_set[2],
min=0,
max=1000,
step=1,
description='Acc. Thrhld',
disabled=False,
continuous_update=False)
self.individual_controllers_box.children = [self.trajectories_id,
frame_range,
threshold_mean,
smooth_cycles,
acceleration_threshold]
frame_range.observe(handler=self.print_individual_characterization, type='change', names='value')
if self.interface.selected_index == 4:
particle = b['new'] # original [b['new']]
self.vel, self.acc_vel = self.calculate_vel(self.t1, particle)
# smooth 0 times.
smooth = int(self.optimal_parameter_set[1])
n_frames = int(self.optimal_parameter_set[0])
if smooth != 0 and n_frames != 0:
self.av_vel = self.calculate_average_vel(self.vel, threshold=n_frames)
for x in range(0, smooth - 1):
self.av_vel = self.calculate_average_vel(self.av_vel, threshold=n_frames)
else:
self.av_vel = self.vel
# calculate acceleration from smoothed data.
self.av_acc_vel = self.calculate_av_acc(self.av_vel)
c = {'new': [min_value, max_value]}
self.get_peaks(abs(self.av_acc_vel))
self.print_individual_characterization(c)
threshold_mean.time_range = c
threshold_mean.particle = particle
smooth_cycles.time_range = c
smooth_cycles.particle = particle
acceleration_threshold.time_range = c
acceleration_threshold.particle = particle
threshold_mean.observe(handler=self.update_average_vel, type='change', names='value')
smooth_cycles.observe(handler=self.update_average_vel, type='change', names='value')
acceleration_threshold.observe(handler=self.update_average_vel, type='change', names='value')
def update_average_vel(self, b):
particle = b['owner'].particle
self.vel, self.acc_vel = self.calculate_vel(self.t1, particle)
if b['owner'].description == '# Frames':
threshold = b['new']
smooth_freq = self.individual_controllers_box.children[3].value
mph = self.individual_controllers_box.children[4].value
if b['owner'].description == '# Smooth':
smooth_freq = b['new']
threshold = self.individual_controllers_box.children[2].value
mph = self.individual_controllers_box.children[4].value
if b['owner'].description == 'Acc. Thrhld':
smooth_freq = self.individual_controllers_box.children[3].value
threshold = self.individual_controllers_box.children[2].value
mph = b['new']
if smooth_freq != 0 and threshold != 0:
self.av_vel = self.calculate_average_vel(self.vel, threshold=threshold)
for x in range(0, smooth_freq - 1):
self.av_vel = self.calculate_average_vel(self.av_vel, threshold=threshold)
else:
self.av_vel = self.vel
self.av_acc_vel = self.calculate_av_acc(self.av_vel) # or calculate_av_acc
c = b['owner'].time_range
self.get_peaks(abs(self.av_acc_vel), mph=mph)
self.print_individual_characterization(c)
def print_individual_characterization(self, b):
# target: self.individual_metrix_box
# actual trajectory is contained in self.trajectories_id.value
# x = self.t1.set_index(['frame', 'particle'])['x'].unstack()
# y = self.t1.set_index(['frame', 'particle'])['y'].unstack()
# vel, angular = self.calculate_vel(x, y)
time_frame = b['new']
min_val = time_frame[0] if time_frame[1] != 0 else 1/self.frames_second.value
max_val = time_frame[1]
particle = self.trajectories_id.value
self.fig_individual, ((self.ax1_ind, self.ax2_ind),
(self.ax3_ind, self.ax4_ind)) = plt.subplots(2, 2,
figsize=[15, 10],
sharex='all')
# instantaneous velocities
ax = self.ax1_ind
ax.plot(self.vel.set_index([self.vel.index.values/self.frames_second.value]).loc[min_val:max_val], color='blue');
ax.set_ylabel('Linear Velocity, micron/s', color='blue')
ax.tick_params('y', colors='blue')
ax.grid(color='grey', linestyle='--', linewidth=0.5)
# instantaneous accelerations
ax3 = self.ax3_ind
ax3.plot(self.acc_vel.set_index([self.acc_vel.index.values/self.frames_second.value]).loc[min_val:max_val],
color='grey');
ax3.set_ylabel('Acceleration, micron/s/s', color='grey')
ax3.tick_params('y', colors='grey')
ax3.grid(color='grey', linestyle='--', linewidth=0.5)
# Average Velocities
ax5 = self.ax2_ind
ax5.plot(self.av_vel.set_index([self.av_vel.index.values/self.frames_second.value]).loc[min_val:max_val], color='blue');
ax5.tick_params('y', colors='blue')
ax6 = ax5.twinx() # instantiate a second axes that shares the same x-axis
ax6.plot(abs(self.av_acc_vel.set_index([self.av_acc_vel.index.values / self.frames_second.value]).loc[min_val:max_val]),
color='grey', alpha=0.5);
ax6.set_ylabel('Absolute Acceleration, micron/s/s', color='grey')
ax6.tick_params('y', colors='grey')
ax6.grid(color='grey', linewidth=0.5)
t = self.peaks_table.set_index(self.peaks_table.index.values / self.frames_second.value).loc[min_val:max_val]
ax6.scatter(t.index.values, t.values,marker='*', s=300, alpha=0.5, c='grey')
ax6.grid(color='grey', linestyle='--', linewidth=0.5)
try:
val = self.individual_controllers_box.children[4].value
ax6.plot([min_val, max_val], [val, val], linewidth=2, color='black')
except:
ax6.plot([min_val, max_val], [self.peak_height, self.peak_height], linewidth=2, color='black')
# Average Accelerations
ax7 = self.ax4_ind
ax7.plot(self.av_acc_vel.set_index([self.av_acc_vel.index.values/self.frames_second.value]).loc[min_val:max_val],
color='grey');
ax7.plot([min_val + 1/self.frames_second.value, max_val], [0, 0], color='black', linestyle='--', alpha=0.5)
ax7.tick_params('y', colors='grey')
ax7.grid(color='grey', linestyle='--', linewidth=0.5)
# set title of all four plots
self.ax1_ind.set_title('Instantaneous Velocities')
self.ax2_ind.set_title('Average Velocities')
self.ax3_ind.set_title('Acceleration (Inst. Vel)')
self.ax3_ind.set_xlabel('Time, s')
self.ax4_ind.set_title('Acceleration (Avg. Vel)')
self.ax4_ind.set_xlabel('Time, s')
data_fig = self.easy_print(ax)
plt.close(ax.figure)
self.individual_metrix_box.children = [Image(value=data_fig)]
def calculate_vel(self, data, particleID):
# this function gets a data frame containing information of all particles,
# a desired particle and return velocity and accelereration data frames
particleID = particleID if (isinstance(particleID, int) or isinstance(particleID, list)) else int(particleID)
# get t_i for the desired particle
t_i = data[data['particle'] == particleID]
# get x and y vectors for the desired particle
x = t_i['x']
y = t_i['y']
vel = pd.DataFrame(np.nan, index=t_i.index.values[1:], columns=[particleID])
acc_vel = pd.DataFrame(np.nan, index=t_i.index.values[2:], columns=[particleID])
for frame in x.index.values[1:]:
d = ((x.loc[frame] - x.loc[frame - 1]) ** 2 + (y.loc[frame] - y.loc[frame - 1]) ** 2) ** 0.5
vel.loc[frame] = d * self.frames_second.value / self.pixels_micron.value
if frame > x.index.values[1]:
acc_vel.loc[frame] = (vel.loc[frame] - vel.loc[frame-1]) * self.frames_second.value
return vel, acc_vel
def calculate_average_vel(self, vel, threshold=4):
average_vel = pd.DataFrame(np.nan, index=vel.index, columns=vel.columns)
ff = vel.index.values[0] # frist frame
for frame in vel.index.values:
if frame < ff + threshold-1:
average_vel.loc[frame] = np.mean(vel.loc[:frame].dropna().values)
else:
average_vel.loc[frame] = np.mean(vel.loc[frame-threshold+1:frame].dropna().values)
return average_vel
def prepare_data_print_histogram(self, data, title):
plt.figure()
ax = plt.gca()
if title == 'Linear Velocity' or 'Angular Velocity':
self.refresh_particle_dictionary()
# get a data frame and return Image data with histogram.
# for each column in data, we first:
# 1. get a slice of data corresponding to selected time ranges
# 2. drop the nan values with s.dropnan
# 3. concatenate resulting vector to main vector/list
# 4. calculate histogram. Generate axes ax. and set title
filtered_data = []
for column in data.columns:
# filter to consider only time frame selected for each column
# eliminate dropna and get values
min_value = self.trajectories_dic[column][0] if self.trajectories_dic[column][0] != 0 \
else 1/self.frames_second.value
max_value = self.trajectories_dic[column][1]
z = data[column].loc[min_value*self.frames_second.value:
max_value*self.frames_second.value].dropna().values
filtered_data = np.append(filtered_data, z)
ax.hist(filtered_data)
if title == 'Frequency of Change of Direction':
self.refresh_particle_dictionary()
ax.clear()
filtered_data2 = []
for column in data.columns:
# filter to consider only time frame selected for each column
# eliminate dropna and get values
min_value = self.trajectories_dic[column][0] if self.trajectories_dic[column][0] != 0 \
else 1 / self.frames_second.value
max_value = self.trajectories_dic[column][1]
z = sum(data[column].loc[min_value*self.frames_second.value: max_value*self.frames_second.value] >
self.threshold_angular.value)
time = max_value-min_value
freq = z/time
filtered_data2 = np.append(filtered_data2, freq)
ax.hist(filtered_data2)
# number of change of direction/second use self.threshold_angular
# Extract image information from axes contained in ax and return image data.
ax.set_xlabel(title)
ax.set_ylabel('Frequency')
buf = BytesIO()
canvas = FigureCanvasAgg(ax.figure)
canvas.print_png(buf)
data_fig = buf.getvalue()
plt.close(ax.figure)
self.interactive_ranges = True
return data_fig
def refresh_particle_dictionary(self):
trajectories_dic = dict((element[0].value, element[1].children[0].value)
for element in self.ensemble_particle_id_list)
self.trajectories_dic = trajectories_dic
def update_frame_segment(self, b):
self.frames = pims.ImageSequence(b.path.value + '/*.jpg', as_grey=True)
self.box1.children[3].max = len(self.frames)-1
self.box1.children[3].value = [0, len(self.frames)-1]
self.box1.children[3].continuous_update = False
self.box1.children[3].observe(handler=self.reshape_frames, type='change', names='value')
self.box3.controller_time_frame.max = len(self.frames)-1
self.box3.controller_time_frame.value = [0, len(self.frames)-1]
self.box5.frame_ranges.max = len(self.frames)-1
self.box5.frame_ranges.value = [0, len(self.frames)-1]
def reshape_frames(self, b):
self.cut_button.disabled = False
ranges = b['new']
self.cut_button.ranges = ranges
def cut_frames(self, b):
ranges = b.ranges
if ranges[0] != 0:
return
self.frames = self.frames[ranges[0]:ranges[1]+1]
self.box1.children[3].max = len(self.frames) - 1
self.box1.children[3].value = [0, len(self.frames) - 1]
self.box3.controller_time_frame.max = len(self.frames)-1
self.box3.controller_time_frame.value = [0, len(self.frames)-1]
self.box5.frame_ranges.max = len(self.frames)-1
self.box5.frame_ranges.value = [0, len(self.frames)-1]
def easy_print(self, ax):
buf = BytesIO()
canvas = FigureCanvasAgg(ax.figure)
canvas.print_png(buf)
data_fig = buf.getvalue()
return data_fig
def calculate_av_acc(self, vel):
acc_vel = pd.DataFrame(np.nan, index=vel.index.values[1:], columns=vel.columns)
for frame in vel.index.values[1:]:
acc_vel.loc[frame] = (vel.loc[frame] - vel.loc[frame - 1]) * self.frames_second.value
return acc_vel
def get_peaks(self, vel, mph=10):
# get indices
mph = self.individual_controllers_box.children[4].value
col = vel.columns
frames = vel[col].dropna().index.values
values = vel[col].dropna().values
data = np.asanyarray([i[0] for i in values])
peakind = detect_peaks(data, mph=mph)
peak_values = np.asanyarray([data[element] for element in peakind])
peak_frame = np.asanyarray([frames[element] for element in peakind])
self.peaks_table = pd.DataFrame(peak_values, index=peak_frame, columns=vel.columns)
self.peak_height = mph
def calculate_ensemble(self, b):
b.description = 'Calculating...'
b.disabled = True
self.lock1 = False
# Target: self.box5.children=[legend6, #0
# frames_average, #1
# smooth_cycles, #2
# legend7, #3
# lin_vel_threshold, #4
# acc_threshold, #5
# b_calculate, #6
# results, #7
# ]
# create dataFrame
columns = ['Velocity', #0
'Acceleration', #1
'Mean_vel', #2
'Change_dir', #3
'Duration', #4
'Change_dir_sec', #5
'Displacement', #6
'x', #7
'y', #8
'First_Time', #9
'firstFrame'] #10
# Get indices for unique particles
y = self.t1.set_index(['frame', 'particle'])['y'].unstack()
id_particles = y.columns.values
# create empty data frame that will contain all results.
results = pd.DataFrame(np.nan, index=id_particles, columns=columns)
results.index.name = 'Particle'
# create dictionaries to store vectors for velocity, smoothed velocity and acceleration.
vel_dic = {}
smooth_vel_dic = {}
smooth_acc_dic ={}
for particle in results.index.values:
# calculate velocity
vel, acc = self.calculate_vel(self.t1, particle)
vel_dic.update({particle: vel})
# smooth velocity vector
smooth_cycles = self.box5.children[2].value
n_frames = self.box5.children[1].value
if smooth_cycles != 0 and n_frames != 0:
av_vel = self.calculate_average_vel(vel, threshold=n_frames)
for x in range(0, smooth_cycles - 1):
av_vel = self.calculate_average_vel(av_vel, threshold=n_frames)
else:
av_vel = vel
smooth_vel_dic.update({particle: av_vel})
# calculate acceleration
av_acc = self.calculate_av_acc(av_vel)
smooth_acc_dic.update({particle: av_acc})
# calculate average linear vel for filtering purposes
results.loc[particle]['Mean_vel'] = np.average(av_vel.values)
# get x and y coordinates.
# results.loc[particle]['x'], results.loc[particle]['y'] = self.get_x_y(self.t1, particle)
results.loc[particle]['x'], \
results.loc[particle]['y'], \
results.loc[particle]['First_Time'] = self.get_x_y_time(self.t1, particle)
# get number of change of direction, duration of trajectory and frequency of change of direction
results.loc[particle]['Change_dir'], \
results.loc[particle]['Duration'], \
results.loc[particle]['Change_dir_sec'] = self.get_peaks_data(abs(av_acc), self.box5.acceleration.value)
# calculate Displacement based on three point method.
results.loc[particle]['Displacement'], \
results.loc[particle]['firstFrame'] = get_displacement(self.t1, particle)
# At this point, the data frame 'results' contain data for all particles. Now we need to filter for minimum
# linear velocity threshold, minimum Displacement and max number of change of direction.
self.box5.lin_vel_threshold.results = results
self.box5.children[6].children[0].results = results
self.box5.children[6].children[1].results = results
self.box5.frame_ranges.results = results
percentage = self.box5.children[6].children[0].value
filtered_results = results[results['Mean_vel'] > self.box5.lin_vel_threshold.value]
filtered_results = filtered_results[filtered_results['Displacement'] >=
results['Displacement'].max() * percentage / 100]
filtered_results = filtered_results[filtered_results['Change_dir'] <= self.box5.children[6].children[1].value]
self.box5.lin_vel_threshold.observe(handler=self.update_results, type='change', names='value')
self.box5.children[6].children[0].observe(handler=self.update_results, type='change', names='value')
self.box5.children[6].children[1].observe(handler=self.update_results, type='change', names='value')
self.box5.frame_ranges.observe(handler=self.update_results, type='change', names='value')
# now the next step is to print results to trajectories figure and histograms.
self.print_ensemble_results(filtered_results)
self.excel_unfiltered = results
self.excel_filtered = filtered_results
# call function to generate adaptation curve
if self.box5.children[8].value != 0:
self.calculate_adaptation_time_ensemble(filtered_results, self.box5.children[8], self.box5.children[9])
# attach filtered results to calculate button.
self.box5.children[10].results = filtered_results
# observe changes in "change of direction". self.box5.children[8] and "time interval" self.box5.children[7]
self.box5.children[9].observe(handler=self.update_adaptation_ensemble, names='value') # updated
self.box5.children[8].observe(handler=self.update_adaptation_ensemble, names='value') # updated
#populate self.box5.children[13] which shows settings used to calculate adaptation curve.
self.display_adaptation_options()
self.box5.children[13].children[1].children[1].results = results
self.box5.children[13].children[1].children[1].observe(handler=self.update_results, type='change', names='value')
self.display_adaptation_data()
b.description = 'Calculate'
b.disabled = False
def display_adaptation_data(self):
button = Button(description='Show Data')
button.on_click(self.print_adaptation_data)
data_table = HTML()
tumbling_range_window = VBox()
label1 = HTML(value='Calculate tumbling frequency (1/s) for a given time frame:')
max_value = round(len(self.frames) / self.frames_second.value, 2)
time_range = widgets.FloatRangeSlider(value=[0, max_value], min=0, max=max_value, step=0.1,
description='T. Frame (s):', disabled=False, continuous_update=False,
orientation='horizontal', readout=True, readout_format='.1f',)
label2 = HTML()
label2.value = 'The tumbling frequency is...'
tumbling_range_window.children = [label1, HBox(children=[time_range, label2])]
self.box5.children[14].children = [button, data_table, tumbling_range_window]
time_range.observe(handler=self.calculate_tumbling_frequency_time_frame, names='value')
self.calculate_tumbling_frequency_time_frame(time_range)
def calculate_tumbling_frequency_time_frame(self, b):
min_time = self.box5.children[14].children[2].children[1].children[0].value[0]
max_time = self.box5.children[14].children[2].children[1].children[0].value[1]
data = self.excel_filtered
tumbling_freq, _, _, number = self.slice_data_frame(data, min_time, max_time)
self.box5.children[14].children[2].children[1].children[1].value = 'The tumbling frequency is ' \
+ str(round(tumbling_freq,2)) + \
' 1/s. Number of trajectories is ' + \
str(number)
def print_adaptation_data(self, b):
data = self.adaptation_curve_data
if b.description == 'Show Data':
s = generate_adaptation_string(data)
self.box5.children[14].children[1].value = s
self.box5.children[14].children[0].description = 'Hide Data'
else:
self.box5.children[14].children[1].value = ''
self.box5.children[14].children[0].description = 'Show Data'
def display_adaptation_options(self):
string1 = 'Adaptation curves calculated using single trajectories: '
checkbox1 = Checkbox(value=self.single_trajectories)
string2 = 'Show ID: '
checkbox2 = Checkbox(value=self.show_id)
checkbox2.is_show_id_checkbox = True
smooth_adaptation = Button(description='Smooth Adptn Crv')
smooth_adaptation.on_click(self.display_adaptation_smooth_options)
smooth_options = VBox()
smooth_options.content = [widgets.BoundedIntText(description='# Points:', value=2, step=1, min=0), # 0
widgets.BoundedIntText(description='# Smooth:', value=0, step=1, min=0) # 1
]
smooth_adaptation.options_box = smooth_options
self.box5.children[13].children = [HBox(children=[Label(value=string1), checkbox1]), # 0
HBox(children=[Label(value=string2), checkbox2]), # 1
smooth_adaptation, # 2
smooth_options, # 3
]
checkbox1.observe(handler=self.recalculate_adaptation_ensemble, names='value')
smooth_options.content[0].observe(handler=self.smooth_adaptation_curve, names='value')
smooth_options.content[1].observe(handler=self.smooth_adaptation_curve, names='value')
def smooth_adaptation_curve(self, b):
# modify self.adaptation_curve_smooth_data
data = self.adaptation_curve_data
n_points = self.box5.children[13].children[3].children[0].value
n_smooth = self.box5.children[13].children[3].children[1].value
smooth_data = pd.DataFrame(index=data.index, columns=data.columns)
if n_smooth == 0 or n_points == 0:
self.adaptation_curve_smooth_data = None
if n_points != 0:
for x in range(n_smooth):
counter = 0
data = self.adaptation_curve_data if x == 0 else smooth_data
for _ in self.adaptation_curve_data.index.values:
if counter <= n_points:
smooth_data['Frequency'].iloc[counter] = np.mean(data['Frequency'].iloc[:counter+1].dropna().values)
else:
smooth_data['Frequency'].iloc[counter] = \
np.mean(data['Frequency'].iloc[counter - n_points + 1:counter+1].dropna().values)
counter += 1
self.adaptation_curve_smooth_data = smooth_data
# Then call the plotting functions.
self.calculate_adaptation_time_ensemble(self.box5.children[10].results,
self.box5.children[8],
self.box5.children[9])
self.update_adaptation_curve_table()
def display_adaptation_smooth_options(self, b):
if b.description == 'Smooth Adptn Crv':
b.options_box.children = b.options_box.content
b.description = 'Hide'
else:
b.description = 'Smooth Adptn Crv'
b.options_box.children = []
def update_adaptation_ensemble(self, b):
# check that b.new is different from 0.
#if b['new'] != 0:
if self.adaptation_curve_smooth_data is None:
self.calculate_adaptation_time_ensemble(self.box5.children[10].results,
self.box5.children[8],
self.box5.children[9])
self.update_adaptation_curve_table()
else:
self.adaptation_curve_smooth_data = None
self.calculate_adaptation_time_ensemble(self.box5.children[10].results,
self.box5.children[8],
self.box5.children[9])
self.smooth_adaptation_curve(None)
def recalculate_adaptation_ensemble(self, b):
self.single_trajectories = b['new']
self.calculate_adaptation_time_ensemble(self.box5.children[10].results,
self.box5.children[8],
self.box5.children[9])
self.calculate_tumbling_frequency_time_frame(None)
self.update_adaptation_curve_table()
def update_adaptation_curve_table(self):
if self.box5.children[14].children[0].description == 'Hide Data':
if self.adaptation_curve_smooth_data is None:
self.box5.children[14].children[1].value = generate_adaptation_string(self.adaptation_curve_data,
smooth=False)
else:
self.box5.children[14].children[1].value = generate_adaptation_string(self.adaptation_curve_smooth_data,
smooth=True)
def calculate_adaptation_time_ensemble(self, data, time_interval, change_dir_thrshld):
# define loop using time_interval & total time
# slice dataFrame results to consider only particles in a certain interval
# calculate mean, min and max and assign to data frame
# generate plot
time_interval = time_interval.value
change_dir_thrshld = change_dir_thrshld.value
# construct a dataFrame to save results.
columns = ['Frequency', # 0
'min', # 1
'max', # 2
'Number'] # 3
# create empty data frame that will contain all results.
index_array = np.arange(time_interval,
len(self.frames) / self.frames_second.value + time_interval,
time_interval)
index_array = np.round(index_array, 3)
results =
|
pd.DataFrame(np.nan, index=index_array, columns=columns)
|
pandas.DataFrame
|
import pandas as pd
from ai4netmon.Analysis.bias import generate_distribution_plots as gdp
from matplotlib import pyplot as plt
from matplotlib import cm, colors
AGGREGATE_DATA_FNAME = 'https://raw.githubusercontent.com/sermpezis/ai4netmon/dev/data/aggregate_data/asn_aggregate_data_20211201.csv'
BIAS_DF = './data/bias_df__no_stubs.csv'
IMPROVEMENTS = '../../data/misc/improvements20210601.txt'
SAVE_CSV_FNAME = './data/df_bias_vs_improvement_detailed.csv'
RENAME_COLUMNS = True
## load network data
df =
|
pd.read_csv(AGGREGATE_DATA_FNAME, header=0, index_col=0)
|
pandas.read_csv
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": pandas.StringDtype(),
"mrrInXorRecEvents": pandas.StringDtype(),
"mrrFailInXorRecEvents": pandas.StringDtype(),
"mrrUpperpageEvents": pandas.StringDtype(),
"mrrLowerpageEvents": pandas.StringDtype(),
"mrrSlcpageEvents": pandas.StringDtype(),
"mrrReReadTotal": pandas.StringDtype(),
"powerOnResets": pandas.StringDtype(),
"powerOnMinutes": pandas.StringDtype(),
"throttleOnMilliseconds": pandas.StringDtype(),
"ctxTailMagic": pandas.StringDtype(),
"contextDropCount": pandas.StringDtype(),
"lastCtxSequenceId": pandas.StringDtype(),
"currCtxSequenceId": pandas.StringDtype(),
"mbliEraseCount": pandas.StringDtype(),
"pageAverageProgramCount": pandas.StringDtype(),
"bandAverageEraseCount": pandas.StringDtype(),
"bandTotalEraseCount": pandas.StringDtype(),
"bandReloForXorRebuildFail": pandas.StringDtype(),
"defragSpeculativeMiss": pandas.StringDtype(),
"uncorrectableBackgroundScan": pandas.StringDtype(),
"BitErrorsHost57": pandas.StringDtype(),
"BitErrorsHost58": pandas.StringDtype(),
"BitErrorsHost59": pandas.StringDtype(),
"BitErrorsHost60": pandas.StringDtype(),
"BitErrorsHost61": pandas.StringDtype(),
"BitErrorsHost62": pandas.StringDtype(),
"BitErrorsHost63": pandas.StringDtype(),
"BitErrorsHost64": pandas.StringDtype(),
"BitErrorsHost65": pandas.StringDtype(),
"BitErrorsHost66": pandas.StringDtype(),
"BitErrorsHost67": pandas.StringDtype(),
"BitErrorsHost68": pandas.StringDtype(),
"BitErrorsHost69": pandas.StringDtype(),
"BitErrorsHost70": pandas.StringDtype(),
"BitErrorsHost71": pandas.StringDtype(),
"BitErrorsHost72": pandas.StringDtype(),
"BitErrorsHost73": pandas.StringDtype(),
"BitErrorsHost74": pandas.StringDtype(),
"BitErrorsHost75": pandas.StringDtype(),
"BitErrorsHost76": pandas.StringDtype(),
"BitErrorsHost77": pandas.StringDtype(),
"BitErrorsHost78": pandas.StringDtype(),
"BitErrorsHost79": pandas.StringDtype(),
"BitErrorsHost80": pandas.StringDtype(),
"bitErrBucketArray1": pandas.StringDtype(),
"bitErrBucketArray2": pandas.StringDtype(),
"bitErrBucketArray3": pandas.StringDtype(),
"bitErrBucketArray4": pandas.StringDtype(),
"bitErrBucketArray5": pandas.StringDtype(),
"bitErrBucketArray6": pandas.StringDtype(),
"bitErrBucketArray7": pandas.StringDtype(),
"bitErrBucketArray8": pandas.StringDtype(),
"bitErrBucketArray9": pandas.StringDtype(),
"bitErrBucketArray10": pandas.StringDtype(),
"bitErrBucketArray11": pandas.StringDtype(),
"bitErrBucketArray12": pandas.StringDtype(),
"bitErrBucketArray13": pandas.StringDtype(),
"bitErrBucketArray14": pandas.StringDtype(),
"bitErrBucketArray15": pandas.StringDtype(),
"bitErrBucketArray16": pandas.StringDtype(),
"bitErrBucketArray17": pandas.StringDtype(),
"bitErrBucketArray18": pandas.StringDtype(),
"bitErrBucketArray19": pandas.StringDtype(),
"bitErrBucketArray20": pandas.StringDtype(),
"bitErrBucketArray21": pandas.StringDtype(),
"bitErrBucketArray22": pandas.StringDtype(),
"bitErrBucketArray23": pandas.StringDtype(),
"bitErrBucketArray24": pandas.StringDtype(),
"bitErrBucketArray25": pandas.StringDtype(),
"bitErrBucketArray26": pandas.StringDtype(),
"bitErrBucketArray27": pandas.StringDtype(),
"bitErrBucketArray28": pandas.StringDtype(),
"bitErrBucketArray29": pandas.StringDtype(),
"bitErrBucketArray30": pandas.StringDtype(),
"bitErrBucketArray31": pandas.StringDtype(),
"bitErrBucketArray32": pandas.StringDtype(),
"bitErrBucketArray33": pandas.StringDtype(),
"bitErrBucketArray34": pandas.StringDtype(),
"bitErrBucketArray35": pandas.StringDtype(),
"bitErrBucketArray36": pandas.StringDtype(),
"bitErrBucketArray37": pandas.StringDtype(),
"bitErrBucketArray38": pandas.StringDtype(),
"bitErrBucketArray39": pandas.StringDtype(),
"bitErrBucketArray40": pandas.StringDtype(),
"bitErrBucketArray41": pandas.StringDtype(),
"bitErrBucketArray42": pandas.StringDtype(),
"bitErrBucketArray43": pandas.StringDtype(),
"bitErrBucketArray44": pandas.StringDtype(),
"bitErrBucketArray45": pandas.StringDtype(),
"bitErrBucketArray46": pandas.StringDtype(),
"bitErrBucketArray47": pandas.StringDtype(),
"bitErrBucketArray48": pandas.StringDtype(),
"bitErrBucketArray49": pandas.StringDtype(),
"bitErrBucketArray50": pandas.StringDtype(),
"bitErrBucketArray51": pandas.StringDtype(),
"bitErrBucketArray52": pandas.StringDtype(),
"bitErrBucketArray53": pandas.StringDtype(),
"bitErrBucketArray54": pandas.StringDtype(),
"bitErrBucketArray55": pandas.StringDtype(),
"bitErrBucketArray56": pandas.StringDtype(),
"bitErrBucketArray57": pandas.StringDtype(),
"bitErrBucketArray58": pandas.StringDtype(),
"bitErrBucketArray59": pandas.StringDtype(),
"bitErrBucketArray60": pandas.StringDtype(),
"bitErrBucketArray61": pandas.StringDtype(),
"bitErrBucketArray62": pandas.StringDtype(),
"bitErrBucketArray63": pandas.StringDtype(),
"bitErrBucketArray64": pandas.StringDtype(),
"bitErrBucketArray65": pandas.StringDtype(),
"bitErrBucketArray66": pandas.StringDtype(),
"bitErrBucketArray67": pandas.StringDtype(),
"bitErrBucketArray68": pandas.StringDtype(),
"bitErrBucketArray69": pandas.StringDtype(),
"bitErrBucketArray70": pandas.StringDtype(),
"bitErrBucketArray71": pandas.StringDtype(),
"bitErrBucketArray72": pandas.StringDtype(),
"bitErrBucketArray73": pandas.StringDtype(),
"bitErrBucketArray74": pandas.StringDtype(),
"bitErrBucketArray75": pandas.StringDtype(),
"bitErrBucketArray76": pandas.StringDtype(),
"bitErrBucketArray77": pandas.StringDtype(),
"bitErrBucketArray78": pandas.StringDtype(),
"bitErrBucketArray79": pandas.StringDtype(),
"bitErrBucketArray80": pandas.StringDtype(),
"mrr_successDistribution1": pandas.StringDtype(),
"mrr_successDistribution2": pandas.StringDtype(),
"mrr_successDistribution3": pandas.StringDtype(),
"mrr_successDistribution4": pandas.StringDtype(),
"mrr_successDistribution5": pandas.StringDtype(),
"mrr_successDistribution6": pandas.StringDtype(),
"mrr_successDistribution7": pandas.StringDtype(),
"mrr_successDistribution8": pandas.StringDtype(),
"mrr_successDistribution9": pandas.StringDtype(),
"mrr_successDistribution10": pandas.StringDtype(),
"mrr_successDistribution11": pandas.StringDtype(),
"mrr_successDistribution12": pandas.StringDtype(),
"mrr_successDistribution13": pandas.StringDtype(),
"mrr_successDistribution14": pandas.StringDtype(),
"mrr_successDistribution15": pandas.StringDtype(),
"mrr_successDistribution16": pandas.StringDtype(),
"mrr_successDistribution17": pandas.StringDtype(),
"mrr_successDistribution18": pandas.StringDtype(),
"mrr_successDistribution19": pandas.StringDtype(),
"mrr_successDistribution20": pandas.StringDtype(),
"mrr_successDistribution21": pandas.StringDtype(),
"mrr_successDistribution22": pandas.StringDtype(),
"mrr_successDistribution23": pandas.StringDtype(),
"mrr_successDistribution24": pandas.StringDtype(),
"mrr_successDistribution25": pandas.StringDtype(),
"mrr_successDistribution26": pandas.StringDtype(),
"mrr_successDistribution27": pandas.StringDtype(),
"mrr_successDistribution28": pandas.StringDtype(),
"mrr_successDistribution29": pandas.StringDtype(),
"mrr_successDistribution30": pandas.StringDtype(),
"mrr_successDistribution31": pandas.StringDtype(),
"mrr_successDistribution32": pandas.StringDtype(),
"mrr_successDistribution33": pandas.StringDtype(),
"mrr_successDistribution34": pandas.StringDtype(),
"mrr_successDistribution35": pandas.StringDtype(),
"mrr_successDistribution36": pandas.StringDtype(),
"mrr_successDistribution37": pandas.StringDtype(),
"mrr_successDistribution38": pandas.StringDtype(),
"mrr_successDistribution39": pandas.StringDtype(),
"mrr_successDistribution40": pandas.StringDtype(),
"mrr_successDistribution41": pandas.StringDtype(),
"mrr_successDistribution42": pandas.StringDtype(),
"mrr_successDistribution43": pandas.StringDtype(),
"mrr_successDistribution44": pandas.StringDtype(),
"mrr_successDistribution45": pandas.StringDtype(),
"mrr_successDistribution46": pandas.StringDtype(),
"mrr_successDistribution47": pandas.StringDtype(),
"mrr_successDistribution48": pandas.StringDtype(),
"mrr_successDistribution49": pandas.StringDtype(),
"mrr_successDistribution50": pandas.StringDtype(),
"mrr_successDistribution51": pandas.StringDtype(),
"mrr_successDistribution52": pandas.StringDtype(),
"mrr_successDistribution53": pandas.StringDtype(),
"mrr_successDistribution54": pandas.StringDtype(),
"mrr_successDistribution55": pandas.StringDtype(),
"mrr_successDistribution56": pandas.StringDtype(),
"mrr_successDistribution57": pandas.StringDtype(),
"mrr_successDistribution58": pandas.StringDtype(),
"mrr_successDistribution59": pandas.StringDtype(),
"mrr_successDistribution60": pandas.StringDtype(),
"mrr_successDistribution61": pandas.StringDtype(),
"mrr_successDistribution62": pandas.StringDtype(),
"mrr_successDistribution63": pandas.StringDtype(),
"mrr_successDistribution64": pandas.StringDtype(),
"blDowngradeCount": pandas.StringDtype(),
"snapReads": pandas.StringDtype(),
"pliCapTestTime": pandas.StringDtype(),
"currentTimeToFreeSpaceRecovery": pandas.StringDtype(),
"worstTimeToFreeSpaceRecovery": pandas.StringDtype(),
"rspnandReads": pandas.StringDtype(),
"cachednandReads": pandas.StringDtype(),
"spnandReads": pandas.StringDtype(),
"dpnandReads": pandas.StringDtype(),
"qpnandReads": pandas.StringDtype(),
"verifynandReads": pandas.StringDtype(),
"softnandReads": pandas.StringDtype(),
"spnandWrites": pandas.StringDtype(),
"dpnandWrites": pandas.StringDtype(),
"qpnandWrites": pandas.StringDtype(),
"opnandWrites": pandas.StringDtype(),
"xpnandWrites": pandas.StringDtype(),
"unalignedHostWriteCmd": pandas.StringDtype(),
"randomReadCmd": pandas.StringDtype(),
"randomWriteCmd": pandas.StringDtype(),
"secVenCmdCount": pandas.StringDtype(),
"secVenCmdCountFails": pandas.StringDtype(),
"mrrFailOnSlcOtfPages": pandas.StringDtype(),
"mrrFailOnSlcOtfPageMarkedAsMBPD": pandas.StringDtype(),
"lcorParitySeedErrors": pandas.StringDtype(),
"fwDownloadFails": pandas.StringDtype(),
"fwAuthenticationFails": pandas.StringDtype(),
"fwSecurityRev": pandas.StringDtype(),
"isCapacitorHealthly": pandas.StringDtype(),
"fwWRCounter": pandas.StringDtype(),
"sysAreaEraseFailCount": pandas.StringDtype(),
"iusDefragRelocated4DataRetention": pandas.StringDtype(),
"I2CTemp": pandas.StringDtype(),
"lbaMismatchOnNandReads": pandas.StringDtype(),
"currentWriteStreamsCount": pandas.StringDtype(),
"nandWritesPerStream1": pandas.StringDtype(),
"nandWritesPerStream2": pandas.StringDtype(),
"nandWritesPerStream3": pandas.StringDtype(),
"nandWritesPerStream4": pandas.StringDtype(),
"nandWritesPerStream5": pandas.StringDtype(),
"nandWritesPerStream6": pandas.StringDtype(),
"nandWritesPerStream7": pandas.StringDtype(),
"nandWritesPerStream8": pandas.StringDtype(),
"nandWritesPerStream9": pandas.StringDtype(),
"nandWritesPerStream10": pandas.StringDtype(),
"nandWritesPerStream11": pandas.StringDtype(),
"nandWritesPerStream12": pandas.StringDtype(),
"nandWritesPerStream13": pandas.StringDtype(),
"nandWritesPerStream14": pandas.StringDtype(),
"nandWritesPerStream15": pandas.StringDtype(),
"nandWritesPerStream16": pandas.StringDtype(),
"nandWritesPerStream17": pandas.StringDtype(),
"nandWritesPerStream18": pandas.StringDtype(),
"nandWritesPerStream19": pandas.StringDtype(),
"nandWritesPerStream20": pandas.StringDtype(),
"nandWritesPerStream21": pandas.StringDtype(),
"nandWritesPerStream22": pandas.StringDtype(),
"nandWritesPerStream23": pandas.StringDtype(),
"nandWritesPerStream24": pandas.StringDtype(),
"nandWritesPerStream25": pandas.StringDtype(),
"nandWritesPerStream26": pandas.StringDtype(),
"nandWritesPerStream27": pandas.StringDtype(),
"nandWritesPerStream28": pandas.StringDtype(),
"nandWritesPerStream29": pandas.StringDtype(),
"nandWritesPerStream30": pandas.StringDtype(),
"nandWritesPerStream31": pandas.StringDtype(),
"nandWritesPerStream32": pandas.StringDtype(),
"hostSoftReadSuccess": pandas.StringDtype(),
"xorInvokedCount": pandas.StringDtype(),
"comresets": pandas.StringDtype(),
"syncEscapes": pandas.StringDtype(),
"rErrHost": pandas.StringDtype(),
"rErrDevice": pandas.StringDtype(),
"iCrcs": pandas.StringDtype(),
"linkSpeedDrops": pandas.StringDtype(),
"mrrXtrapageEvents": pandas.StringDtype(),
"mrrToppageEvents": pandas.StringDtype(),
"hostXorSuccessCount": pandas.StringDtype(),
"hostXorFailCount": pandas.StringDtype(),
"nandWritesWithPreReadPerStream1": pandas.StringDtype(),
"nandWritesWithPreReadPerStream2": pandas.StringDtype(),
"nandWritesWithPreReadPerStream3": pandas.StringDtype(),
"nandWritesWithPreReadPerStream4": pandas.StringDtype(),
"nandWritesWithPreReadPerStream5": pandas.StringDtype(),
"nandWritesWithPreReadPerStream6": pandas.StringDtype(),
"nandWritesWithPreReadPerStream7": pandas.StringDtype(),
"nandWritesWithPreReadPerStream8": pandas.StringDtype(),
"nandWritesWithPreReadPerStream9": pandas.StringDtype(),
"nandWritesWithPreReadPerStream10": pandas.StringDtype(),
"nandWritesWithPreReadPerStream11": pandas.StringDtype(),
"nandWritesWithPreReadPerStream12": pandas.StringDtype(),
"nandWritesWithPreReadPerStream13": pandas.StringDtype(),
"nandWritesWithPreReadPerStream14": pandas.StringDtype(),
"nandWritesWithPreReadPerStream15": pandas.StringDtype(),
"nandWritesWithPreReadPerStream16": pandas.StringDtype(),
"nandWritesWithPreReadPerStream17": pandas.StringDtype(),
"nandWritesWithPreReadPerStream18": pandas.StringDtype(),
"nandWritesWithPreReadPerStream19": pandas.StringDtype(),
"nandWritesWithPreReadPerStream20": pandas.StringDtype(),
"nandWritesWithPreReadPerStream21": pandas.StringDtype(),
"nandWritesWithPreReadPerStream22": pandas.StringDtype(),
"nandWritesWithPreReadPerStream23": pandas.StringDtype(),
"nandWritesWithPreReadPerStream24": pandas.StringDtype(),
"nandWritesWithPreReadPerStream25": pandas.StringDtype(),
"nandWritesWithPreReadPerStream26": pandas.StringDtype(),
"nandWritesWithPreReadPerStream27": pandas.StringDtype(),
"nandWritesWithPreReadPerStream28": pandas.StringDtype(),
"nandWritesWithPreReadPerStream29": pandas.StringDtype(),
"nandWritesWithPreReadPerStream30": pandas.StringDtype(),
"nandWritesWithPreReadPerStream31": pandas.StringDtype(),
"nandWritesWithPreReadPerStream32": pandas.StringDtype(),
"dramCorrectables8to1": pandas.StringDtype(),
"driveRecoveryCount":
|
pandas.StringDtype()
|
pandas.StringDtype
|
import sys
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
#import os
#from matplotlib.dates import DateFormatter, MinuteLocator
#from matplotlib import dates
import datetime as DT
from . import tools as MT
from . import spherical_geometry as M_geo
from . import general as M
import imp
import matplotlib.dates as dates
import os
import warnings
import copy
class ID_tracker(object):
import datetime as datetime
def __init__(self, s, date=None):
self.string=s
#return self.string
def add_front(self, s, date=None):
if date:
now = self.datetime.datetime.now().strftime("%Y-%m-%dT%H:%M")
self.string=s+'.'+self.string+'_'+now
else:
self.string=s+'.'+self.string
self.string
def add_back(self, s, date=None):
if date:
now = self.datetime.datetime.now().strftime("%Y-%m-%dT%H:%M")
self.string=s+'.'+self.string+'_'+now
else:
self.string=self.string+'.'+s
self.string
def add(self, s, date=None):
self.add_front(s,date=date)
class plot_time_chunks_python2(object):
def __init__(self, time, f, data, pos, ax=None, fig=None, **kwargs):
#pos=[ 30, 60, 90]
self.f=f
self.data=data
self.time=time
self.pos=pos
#if isinstance(time[0], int):
# self.time_sec=time
#elif isinstance(time[0], np.datetime64):
# self.time_sec=MT.datetime64_to_sec(time)
# #start_date=str(np.datetime64(t).astype('M8[s]'))
#else:
# raise ValueError("unknown pos type")
#if isinstance(pos[0], int):
# self.pos=pos
#elif isinstance(pos[0], np.datetime64):
# print('print convert timeto sec')
# self.pos=MT.datetime64_to_sec(pos)
# #dates.date2num(time.astype(DT.datetime))
# #start_date=str(np.datetime64(t).astype('M8[s]'))
#else:
# raise ValueError("unknown pos type")
#print(self.time )
#print(self.data.shape)
#print('pos',self.pos)
if type(self.pos[0]) is tuple:
self.time_chu=data_chunks(self.time,self.pos, 0 )
self.data_chu=data_chunks(self.data,self.pos, 0 )
else:
self.time_chu=data_chunks_split(self.time,self.pos, 0 )
self.data_chu=data_chunks_split(self.data,self.pos, 0 )
#print(len(self.time_chu))
#print(len(self.data_chu))
#print(len(self.pos), self.pos[0])
self.Drawer=self.draw_next(**kwargs)
#ax=self.Drawer.next()
#contourfdata=plt.contourf(time_chu.next(),f,data_chu.next().T )
if ax is None:
self.ax=plt.gca()
else:
self.ax=ax
if fig is None:
self.fig=plt.gcf()
else:
self.fig=fig
#plt.show()
plt.ion()
self.cbarflag=True
def draw_next(self, **kwargs):
for i in range(len(self.pos)):
print(i)
#plt.show()
yield self.draw_fig(self.time_chu.next(), self.f, self.data_chu.next(), **kwargs)
#plt.close()
def draw_fig(self, time, f, data,clevs,ylim=None ,cmap=None, **kwargs):
import matplotlib.colors as colors
self.ax.clear()
time_local=time#time_chu.next()
data_local=data#data_chu.next()
print('time', time_local.shape)
print('data', data_local.shape)
#Figure=M.plot_periodogram(time_local,f[:],data_local[:,:], **kwargs)
#fig=plt.gcf()
#M.clevels(data_local[:,:], )
#Figure.imshow(shading=True, downscale_fac=None, anomalie=False,ax=(self.ax,self.fig), cbar=self.cbarflag)
#Figure.set_xaxis_to_days(int1=1, int2=2)
#Figure.ax.set_yscale("linear", nonposy='clip')
self.clevs=clevs
cmap=plt.cm.PuBuGn if cmap is None else cmap
shading='gouraud'
norm = colors.BoundaryNorm(boundaries=self.clevs, ncolors=256)
#self.cs=plt.contourf(time_local,f,data_local.T,self.clevs, **kwargs)
self.cs=plt.pcolormesh(time_local,f,data_local.T,cmap=cmap , norm=norm, shading=shading)
#self.ax.set_yscale("log", nonposy='clip')
if self.cbarflag is True:
self.cbar= plt.colorbar(self.cs,pad=0.01)#, Location='right')#
self.cbar.ax.aspect=100
self.cbar.outline.set_linewidth(0)
#self.cbar.set_label('Power db(' + self.data_unit + '^2/f ')
if ylim is not None:
self.ax.set_ylim(ylim[0], ylim[1])
#self.ax.set_xticklabels(time_local.astype('M8[D]')[trange][::6], minor=False)
#drawnow(draw_fig)
#draw_fig()# The drawnow(makeFig) command can be replaced
plt.draw()
self.cbarflag=False
#self.ax=Figure.ax
return self.ax
class plot_time_chunks(object):
def __init__(self, time, f, data, pos, ax=None, fig=None, **kwargs):
#pos=[ 30, 60, 90]
self.f=f
self.data=data
self.time=time
self.pos=pos
#if isinstance(time[0], int):
# self.time_sec=time
#elif isinstance(time[0], np.datetime64):
# self.time_sec=MT.datetime64_to_sec(time)
# #start_date=str(np.datetime64(t).astype('M8[s]'))
#else:
# raise ValueError("unknown pos type")
#if isinstance(pos[0], int):
# self.pos=pos
#elif isinstance(pos[0], np.datetime64):
# print('print convert timeto sec')
# self.pos=MT.datetime64_to_sec(pos)
# #dates.date2num(time.astype(DT.datetime))
# #start_date=str(np.datetime64(t).astype('M8[s]'))
#else:
# raise ValueError("unknown pos type")
#print(self.time )
#print(self.data.shape)
#print('pos',self.pos)
if type(self.pos[0]) is tuple:
self.time_chu=data_chunks(self.time,self.pos, 0 )
self.data_chu=data_chunks(self.data,self.pos, 0 )
else:
self.time_chu=data_chunks_split(self.time,self.pos, 0 )
self.data_chu=data_chunks_split(self.data,self.pos, 0 )
#print(len(self.time_chu))
#print(len(self.data_chu))
#print(len(self.pos), self.pos[0])
self.Drawer=self.draw_next(**kwargs)
#ax=self.Drawer.next()
#contourfdata=plt.contourf(time_chu.next(),f,data_chu.next().T )
if ax is None:
self.ax=plt.gca()
else:
self.ax=ax
if fig is None:
self.fig=plt.gcf()
else:
self.fig=fig
#plt.show()
plt.ion()
self.cbarflag=True
def draw_next(self, **kwargs):
for i in range(len(self.pos)):
print(i)
#plt.show()
yield self.draw_fig(self.time_chu.__next__(), self.f, self.data_chu.__next__(), **kwargs)
#plt.close()
def draw_fig(self, time, f, data,clevs,ylim=None ,cmap=None, **kwargs):
import matplotlib.colors as colors
self.ax.clear()
time_local=time#time_chu.next()
data_local=data#data_chu.next()
print('time', time_local.shape)
print('data', data_local.shape)
#Figure=M.plot_periodogram(time_local,f[:],data_local[:,:], **kwargs)
#fig=plt.gcf()
#M.clevels(data_local[:,:], )
#Figure.imshow(shading=True, downscale_fac=None, anomalie=False,ax=(self.ax,self.fig), cbar=self.cbarflag)
#Figure.set_xaxis_to_days(int1=1, int2=2)
#Figure.ax.set_yscale("linear", nonposy='clip')
self.clevs=clevs
cmap=plt.cm.PuBuGn if cmap is None else cmap
shading='gouraud'
norm = colors.BoundaryNorm(boundaries=self.clevs, ncolors=256)
#self.cs=plt.contourf(time_local,f,data_local.T,self.clevs, **kwargs)
self.cs=plt.pcolormesh(time_local,f,data_local.T,cmap=cmap , norm=norm,
shading=shading)
#self.ax.set_yscale("log", nonposy='clip')
if self.cbarflag is True:
self.cbar= plt.colorbar(self.cs,pad=0.01)#, Location='right')#
self.cbar.ax.aspect=100
self.cbar.outline.set_linewidth(0)
#self.cbar.set_label('Power db(' + self.data_unit + '^2/f ')
if ylim is not None:
self.ax.set_ylim(ylim[0], ylim[1])
#self.ax.set_xticklabels(time_local.astype('M8[D]')[trange][::6], minor=False)
#drawnow(draw_fig)
#draw_fig()# The drawnow(makeFig) command can be replaced
plt.draw()
self.cbarflag=False
#self.ax=Figure.ax
return self.ax
def data_chunks_split(data, pos, dim):
if type(data) is np.ndarray:
datalist=np.split(data,pos, 0)
for D in datalist:
yield D
elif type(data) is list:
raise ValueError("not porgrammed get")
print('list')
datalist=[]
for L in data:
print(L.shape)
#np.split(L,pos, 0)
datalist.append(np.split(L,pos, 0))
for k in range(len(datalist[:][1])):
print(k)
yield datalist[k][:]
def data_chunks(data, pos, dim):
if type(data) is np.ndarray:
datalist=list()
if dim == 0:
for pp in pos:
datalist.append(data[pp[0]:pp[1]])
elif dim ==1:
for pp in pos:
datalist.append(data[:,pp[0]:pp[1]])
for D in datalist:
yield D
elif type(data) is list:
raise ValueError("not porgrammed get")
print('list')
datalist=[]
for L in data:
print(L.shape)
#np.split(L,pos, 0)
datalist.append(np.split(L,pos, 0))
for k in range(len(datalist[:][1])):
print(k)
yield datalist[k][:]
class PointCollectorv3:
def __init__(self, ax, Drawer=None):
self.pointcount=0
line, = ax.plot([np.nan], [np.nan], marker="o", markersize=4, color="red")
lineD, = ax.plot([np.nan], [np.nan], marker="o", markersize=8, color="green")
self.line = line
self.lineD = lineD
self.xs = list(line.get_xdata())
self.ys = list(line.get_ydata())
self.D=[np.nan, np.nan]#line.get_xdata(), line.get_ydata())
self.slopes=list()
self.P1=[]
self.P2=[]
self.D=[]
#self.ax=ax
self.cid = line.figure.canvas.mpl_connect('button_press_event', self)
#self.cid =line.figure.canvas.mpl_connect('key_press_event', self)
self.Drawer=Drawer
def __call__(self, event):
print('click', event)
if (event.inaxes!=self.line.axes) & (self.Drawer is not None):
print('next chunk')
#self.fig.canvas.mpl_disconnect(self.cid)
newax=self.Drawer.next()
#newax=self.line.axes
#print(newax)
line, = newax.plot([np.nan], [np.nan], marker="o", markersize=4, color="red")
lineD, = newax.plot([np.nan], [np.nan], marker="o", markersize=8, color="green")
self.line=line
self.lineD = lineD
self.cid =newax.figure.canvas.mpl_connect('button_press_event', self)
self.pointcount=4
#return
if self.pointcount == 0:
self.pointcount+=1
self.xs.append(event.xdata)
self.ys.append(event.ydata)
self.P1=(event.xdata, event.ydata)
self.line.set_data(self.xs, self.ys)
self.line.figure.canvas.draw()
elif self.pointcount == 1:
self.pointcount+=1
self.xs.append(event.xdata)
self.ys.append(event.ydata)
self.P2=(event.xdata, event.ydata)
self.line.set_data(self.xs, self.ys)
self.line.figure.canvas.draw()
elif self.pointcount >= 2:
print('its 3')
self.pointcount=0
self.D1=(event.xdata,event.ydata)
self.slopes.append([self.P1, self.P2, self.D1])
self.D.append(self.D1)
self.D.append((np.nan,np.nan) )
self.xs.append(np.nan)
self.ys.append(np.nan)
#P1=[]
#P2=[]
#D=[]
self.lineD.set_data(event.xdata, event.ydata)
self.lineD.figure.canvas.draw()
self.line.set_data(self.xs, self.ys)
self.line.figure.canvas.draw()
class PointCollectorv4:
def __init__(self, ax, Drawer=None):
self.pointcount=0
line, = ax.plot([np.nan], [np.nan], marker="o", markersize=4, color="red")
lineD, = ax.plot([np.nan], [np.nan], marker="o", markersize=8, color="green")
self.line = line
self.lineD = lineD
self.xs = list(line.get_xdata())
self.ys = list(line.get_ydata())
self.D=[np.nan, np.nan]#line.get_xdata(), line.get_ydata())
self.slopes=list()
self.P1=[]
self.P2=[]
self.D=[]
#self.ax=ax
self.cid = line.figure.canvas.mpl_connect('button_press_event', self)
#self.cid =line.figure.canvas.mpl_connect('key_press_event', self)
self.Drawer=Drawer
def __call__(self, event):
print('click', event)
if (event.inaxes!=self.line.axes) & (self.Drawer is not None):
print('next chunk')
#self.fig.canvas.mpl_disconnect(self.cid)
newax=self.Drawer.next()
line, = newax.plot([np.nan], [np.nan], marker="o", markersize=4, color="red")
lineD, = newax.plot([np.nan], [np.nan], marker="o", markersize=8, color="green")
self.line=line
self.lineD = lineD
self.cid =newax.figure.canvas.mpl_connect('button_press_event', self)
self.pointcount=4
#newax.figure.canvas.draw()
#return
if self.pointcount == 0:
self.pointcount+=1
self.xs.append(event.xdata)
self.ys.append(event.ydata)
self.P1=(event.xdata, event.ydata)
self.line.set_data(self.xs, self.ys)
self.line.figure.canvas.draw()
elif self.pointcount == 1:
self.pointcount+=1
self.xs.append(event.xdata)
self.ys.append(event.ydata)
self.P2=(event.xdata, event.ydata)
self.line.set_data(self.xs, self.ys)
self.line.figure.canvas.draw()
elif self.pointcount >= 2:
print('its 3')
self.pointcount=0
self.D1=(event.xdata,event.ydata)
self.slopes.append([self.P1, self.P2, self.D1])
self.D.append(self.D1)
self.D.append((np.nan,np.nan) )
self.xs.append(np.nan)
self.ys.append(np.nan)
#P1=[]
#P2=[]
#D=[]
self.lineD.set_data(event.xdata, event.ydata)
self.lineD.figure.canvas.draw()
self.line.set_data(self.xs, self.ys)
self.line.figure.canvas.draw()
def create_listofstorms(slopes, hist=None):
storm=dict()
list_of_storms=dict()
list_of_storms['P1']=[]
list_of_storms['P2']=[]
list_of_storms['D']=[]
hist='list of storms' if hist is None else MT.write_log(hist, 'list of storms')
for s in slopes:
#print(s, len(np.array(s)))
if (sum([None in ss for ss in s]) != 0) or (np.isnan(np.array(s)).any()):
print('None or NAN values, line Skipped:')
print(s)
hist=MT.write_log(hist, 'None or NAN values, line Skipped:'+str(s))
warnings.warn("Some Point are Nan or None")
else:
if s[0][1] > s[1][1]: #pente descendante
P1, P2 = s[1] , s[0]
else:
P2, P1 = s[1] , s[0] #P1:premiere arrivee P2: derniere arrivee
D=s[2] #liste des points verts
list_of_storms['P1'].append(P1)
list_of_storms['P2'].append(P2)
list_of_storms['D'].append(D)
list_of_storms['hist']=hist
return list_of_storms
#def ID_builder(Station, Pol, )
def convert_slope_intersect_to_MS1957(slope, intersect, realtime, verbose=False, as_Dataframe=True):
"""
this function converts the nondimentional slope and intersect to
a radial distance in meters and a inital time as datetime64
"""
Tmin1= realtime[-1]
T0 = realtime[0]
Dt = (Tmin1-T0)
if intersect.size > 1:
import pandas as pd
t0 = pd.to_datetime(realtime[0]) + pd.to_timedelta(intersect *Dt)
else:
t0 = Dt * intersect + T0
if verbose:
print(T0)
print(Dt)
# intersect_adjusted=Storm.cal_intersect_adjust(params) ## add adjustedintersect here!! estiamted line goes now thourgh maximumo fthe model
# t0_peak = Dt * intersect_adjusted + T0
Dt_sec=Dt.astype('m8[s]').astype(float)
dfdt= slope / Dt_sec
g=9.8196
r0= g /(4*np.pi*dfdt )
if as_Dataframe:
import pandas as pd
return pd.DataFrame(data={'r0':r0 , 't0':t0 })
else:
return r0, t0
def convert_geo_time_to_dt64(geo):
import copy
S=copy.deepcopy(geo)
for k,I in S.iteritems():
if type(I) is list:
S[k][0]=MT.sec_to_dt64(np.array(I[0]))
elif isinstance(I, np.ndarray):
S[k]=MT.sec_to_dt64(I)
S['t0']=MT.sec_to_dt64(np.array(S['t0']))
S['t0R']=MT.sec_to_dt64(np.array(S['t0R']))
S['t0L']= MT.sec_to_dt64(np.array(S['t0L']))
return S
def convert_geo_time_to_float_plot(geo):
import copy
S=copy.deepcopy(geo)
converter=MT.sec_to_float_plot_single
for k,I in S.iteritems():
if type(I) is list:
S[k][0]=converter(I[0])
elif isinstance(I, np.ndarray):
S[k]=MT.sec_to_float_plot(I)
S['t0']=converter(np.array(S['t0']))
S['t0R']=converter(np.array(S['t0R']))
S['t0L']= converter(np.array(S['t0L']))
#elif type(I) is float:
# S[k]=MT.sec_to_dt64(np.array(I))
return S
class Storm(object):
def __init__(self, ID):
self.ID=ID
self.hist='------ | '+ self.ID
self.write_log('initialized')
self.fit_dict=False
self.SM_dict_pandas=None
#if S is None:
#date,
def create_storm_geo(self, P1, P2, D, f, **karg):
self.f=f
self.geo=self.geometry(P1, P2, D, **karg)
self.write_log('created geometry')
def geometry(self, P1, P2, D, f_margins=0.001):
#print(P1, P2, D)
f=self.f
mf=(P2[0]-P1[0])/(P2[1]-P1[1])
t0=P1[0]- mf * P1[1]
t0R=D[0]- mf * D[1]
delta_t=abs(t0R-t0)
if t0R > t0:
t0L=t0-delta_t
else:
t0L=t0R
t0R=t0+delta_t
f_low=P1[1]-f_margins
f_high=P2[1]+f_margins
bound_r=mf*f + t0R
bound_l=mf*f + t0L
cline=mf*f+t0
t0_75l=t0-delta_t*.5
line75left=mf*f+t0-delta_t*.5
return {'P1': P1, 'P2': P2, 'D': D,
'mf': mf, 't0': t0, 't0R':t0R,'t0L':t0L,'t0_75l':t0_75l,
'delta_t':delta_t, 'bound_r':bound_r, 'bound_l':bound_l,
'f_low':f_low, 'f_high':f_high,
'cline':cline, 'line75left':line75left, 'f_margins':f_margins}
def plot_stormgeometry(self, time_flag='sec'):
self.write_log('plotted geometry')
f=self.f
if time_flag == 'sec':
S=self.geo
elif time_flag == 'dt64':
S=convert_geo_time_to_dt64(self.geo)
elif time_flag == 'float_plot':
S=convert_geo_time_to_float_plot(self.geo)
else:
raise ValueError("unknown time_flag")
print(S['D'][0],S['D'][1])
plt.plot(S['D'][0],S['D'][1],'.',color='g', markersize=20)
plt.plot(S['P1'][0],S['P1'][1],'.', c='r', markersize=20)
plt.plot(S['P2'][0],S['P2'][1],'.', c='r', markersize=20)
plt.plot(S['t0'],0,'.', c='orange', markersize=20)
plt.plot(S['t0R'],0,'.', c='orange', markersize=20)
plt.plot(S['t0L'],0,'.', c='orange', markersize=20)
plt.plot(S['cline'],f, c='k')
plt.plot(S['bound_r'],f, c='grey')
plt.plot(S['bound_l'],f, c='green')
plt.plot(S['line75left'],f, c='red')
if time_flag == 'sec':
plt.plot(np.linspace(S['t0L'],S['bound_r'].max(), 10),np.ones(10)*S['f_low'], c='grey')
plt.plot(np.linspace(S['t0L'],S['bound_r'].max(), 10),np.ones(10)*S['f_high'], c='grey')
elif time_flag == 'dt64':
tx=np.arange(S['t0L'],S['bound_r'].max(), np.timedelta64(1, 'D'))
plt.plot(tx,np.ones(tx.size)*S['f_low'], c='grey')
plt.plot(tx,np.ones(tx.size)*S['f_high'], c='grey')
elif time_flag == 'float_plot':
tx=np.arange(S['t0L'],S['bound_r'].max(),1)
plt.plot(tx,np.ones(tx.size)*S['f_low'], c='grey')
plt.plot(tx,np.ones(tx.size)*S['f_high'], c='grey')
def plot_cutted_data(self, time_flag='float_plot', **karg ):
self.write_log('plotted cutted data')
from decimal import Decimal
mmin=np.nanmin(self.masked_data)
mmax=np.nanmax(self.masked_data)
self.clevs=np.linspace(mmin, mmax, 31)
#self.clevs=np.arange(0,1+.1,.1)*1e-5
self.cbarstr=['%.1e' % Decimal(p) for p in self.clevs]
Figure=M.plot_spectrogram(self.time_dict[time_flag],self.f,self.masked_data,
#clevs=clevs,
sample_unit='1/'+self.dt_unit,
ylim=[self.geo['f_low'], self.geo['f_high']], cmap=plt.cm.PuBuGn, clevs=self.clevs, **karg)#(0, .1))
Figure.imshow(shading=True, downscale_fac=None,anomalie=False, fig_size=[5, 2])
Figure.set_xaxis_to_days()
Figure.ax.set_yscale("linear", nonposy='clip')
Figure.ax.set_title(self.ID)
Figure.ax.set_ylim(-.001,max([.1, self.f.max()]))
Figure.cbar.set_ticks(self.clevs)
Figure.cbar.set_ticklabels(self.cbarstr)
Figure.F.make_clear_weak()
return Figure
def create_mask(self, time):
self.write_log('masked created')
f, S = self.f, self.geo
ll=np.vstack((S['bound_l'], S['bound_r']))
maskarray=np.logical_and(np.zeros(time.size), True)
#print(time)
dt=int(np.diff(time).mean())
for fi in range(f.size):
mask=M.cut_nparray(time,ll[:,fi][0]-dt, ll[:,fi][1]+dt)
maskarray=np.vstack((maskarray, mask))
maskarray=np.delete(maskarray, 0,0)
fmask=M.cut_nparray(f, S['f_low'], S['f_high'])
_, fmaskmesh=np.meshgrid(time, fmask)
self.mask_full=(fmaskmesh & maskarray).T
def cut_full_data(self, data):
self.data=np.copy(data)
self.data[self.mask_full == False]=np.nan
#return mdata
def cut_data(self, time_in, f_data, data, dt_unit, clevs):
import numpy.ma as ma
self.dt_unit=dt_unit
self.clevs=clevs
if type(time_in) is dict:
time=np.copy(time_in['sec'])
self.dt_sec=np.diff(time).mean()
else:
time=np.copy(time_in)
self.dt_sec=np.diff(time).mean()
self.create_mask(time)
fmask=M.cut_nparray(f_data, self.geo['f_low'], self.geo['f_high'])#np.logical_and(np.zeros(f_data.size)+1, True)
#adjsut geometry
#print(len(fmask))
self.f=self.f[fmask]
self.geo['cline']=self.geo['cline'][fmask]
self.geo['bound_r']=self.geo['bound_r'][fmask]
self.geo['bound_l']=self.geo['bound_l'][fmask]
self.geo['line75left']=self.geo['line75left'][fmask]
# test data shape with time shape
timemask=M.cut_nparray(time, self.geo['t0L'],self.geo['bound_r'].max())
#self.xlim=(self.geo['t0L'],self.geo['bound_r'].max())
#print(timemask)
#return time[timemask], S['masked_data'][:, timemask]
if type(time_in) is dict:
self.time=time[timemask]
self.time_dict=dict()
for k,I in time_in.iteritems():
self.time_dict[k]=I[timemask]
else:
self.time=time[timemask]
#print(fmask.shape)
#print(data.shape)
self.data=np.copy(data[timemask,:][:,fmask])
#print(self.data.shape)
self.mask=self.mask_full[timemask,:][:,fmask]
#print('mask full', self.mask_full.shape)
#print(self.mask.shape)
self.masked_data=np.copy(self.data)
#print(self.masked_data.shape, self.mask.shape)
self.masked_data[self.mask ==False]=np.nan
self.data_masked_array= ma.array(self.data, mask=self.mask)
self.write_log('cutted & assigned data of oroginal shape' + str(data.shape))
self.write_log('data cutted')
def load(self, path, verbose=False):
#load data and attibutes
D= MT.pickle_load(self.ID,path, verbose)
for k, v in D.items():
setattr(self, k, v)
self.hist= MT.json_load(self.ID,path, verbose)[0]
#if os.path.isfile(path+self.ID+'.h5'):
# with pd.HDFStore(path+self.ID+'.h5') as store2:
# #store2 = pd.HDFStore(path+self.ID+'x.h5')
# for k,I in store2.iteritems():
# setattr(self, k, I)
#store2.close()
#return A, B
def save(self, save_path, verbose=False):
import warnings
from pandas import HDFStore
from pandas.io.pytables import PerformanceWarning
self.write_log('data saved')
#save as an npy file with cPickle flag False
#+ Jason for meta data and par numbers.
if not isinstance(self.SM_dict_pandas, type(None)):
#SM_dic=self.SM_dict_pandas
#SM_dic.to_hdf(save_path+self.ID+'.h5','w' )
warnings.filterwarnings('ignore',category=PerformanceWarning)
with
|
HDFStore(save_path+self.ID+'.h5')
|
pandas.HDFStore
|
import datareader
import dataextractor
import bandreader
import numpy as np
from _bisect import bisect
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
import pandas as pd
from scipy import stats
from sklearn import metrics
def full_signal_extract(path, ident):
"""Extract breathing and heartbeat features from one user and save features to file.
:param path: (str) main path to data, where user data is located in specific folders
:param ident: (str) user identifier
:return: Nothing. It saves features (dataframe) to a .csv file
"""
dataread = datareader.DataReader(path, ident) # initialize path to data
data = dataread.read_grc_data() # read from files
data = dataread.unwrap_grc_data() # unwrap phase. returns time and y values
samp_rate = round(len(data[1]) / max(data[0]))
dataextract = dataextractor.DataExtractor(data[0], data[1], samp_rate)
cog_res = dataread.read_cognitive_load_study(ident + '-primary-extract.txt')
end_epoch_time = dataread.get_end_time_cognitive_load_study() # end t
extracted_br_features = dataextract.raw_windowing_breathing(30, 1)
extracted_br_features['br_rate'] = np.array(extracted_br_features['br_rate'].rolling(6).mean())
extracted_br_features_roll_avg = extracted_br_features.loc[:, extracted_br_features.columns != 'times'].rolling(
6).mean()
extracted_br_features_roll_avg['times'] = extracted_br_features['times']
extracted_br_features_roll_avg['br_ok'] = extracted_br_features['br_ok']
extracted_hr_features = dataextract.raw_windowing_heartrate(10, 1)
extracted_hr_features = extracted_hr_features.drop(['hr_HRV_lf', 'hr_HRV_hf', 'hr_HRV_lf_hf'], axis=1)
extracted_hr_features_roll_avg = extracted_hr_features.loc[:, extracted_hr_features.columns != 'times'].rolling(
10).mean()
extracted_hr_features_roll_avg['times'] = extracted_hr_features['times']
extracted_hr_features_roll_avg['hr_ok'] = extracted_hr_features['hr_ok']
extracted_hr_features2 = dataextract.raw_windowing_heartrate(100, 1) # longer time to extract HRV frequency feat.
extracted_hr_features2 = extracted_hr_features2[['hr_HRV_lf', 'hr_HRV_hf', 'hr_HRV_lf_hf', 'times']]
extracted_hr_features2_roll_avg = extracted_hr_features2.loc[:, extracted_hr_features2.columns != 'times'].rolling(
10).mean()
extracted_hr_features2_roll_avg['times'] = extracted_hr_features2['times']
all_features = extracted_br_features_roll_avg
all_features = pd.merge(all_features, extracted_hr_features_roll_avg, on='times')
all_features = pd.merge(all_features, extracted_hr_features2_roll_avg, on='times')
task_timestamps = dataread.get_data_task_timestamps()
relax_timestamps = dataread.get_relax_timestamps()
bandread = bandreader.HeartRateBand(path + '_Hrates/', ident)
band_data = bandread.load()
band_data_time_start = bisect(band_data[0][:], end_epoch_time - data[0][-1] * 1000)
band_data_time_stop = bisect(band_data[0][:], end_epoch_time)
band_data = [band_data[0][band_data_time_start:band_data_time_stop],
band_data[1][band_data_time_start:band_data_time_stop]]
band_data_new__data = [(band_data[0] - band_data[0][0]) / 1000, band_data[1]]
hr_data = extracted_hr_features_roll_avg[['times', 'hr_rate']]
hr_data['times'] = hr_data['times'].astype(int)
band_data = pd.DataFrame()
band_data['times'] = band_data_new__data[0]
band_data['times'] = band_data['times'].astype(int)
band_data['band_rate'] = band_data_new__data[1]
band_data = band_data.drop_duplicates(subset=['times'])
together_data =
|
pd.merge(hr_data, band_data, on='times')
|
pandas.merge
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import unittest
import numpy as np
import pandas as pd
from transformers import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TapasConfig,
TapasTokenizer,
is_tf_available,
)
from transformers.file_utils import cached_property
from transformers.models.auto import get_values
from transformers.testing_utils import require_tensorflow_probability, require_tf, slow
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers import (
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
)
from transformers.models.tapas.modeling_tf_tapas import (
IndexMap,
ProductIndexMap,
flatten,
gather,
range_index_map,
reduce_max,
reduce_mean,
reduce_sum,
)
class TFTapasModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
max_position_embeddings=512,
type_vocab_sizes=[3, 256, 256, 2, 256, 256, 10],
type_sequence_label_size=2,
positive_weight=10.0,
num_aggregation_labels=4,
num_labels=2,
aggregation_loss_importance=0.8,
use_answer_as_supervision=True,
answer_loss_importance=0.001,
use_normalized_answer_loss=False,
huber_loss_delta=25.0,
temperature=1.0,
agg_temperature=1.0,
use_gumbel_for_cells=False,
use_gumbel_for_agg=False,
average_approximation_function="ratio",
cell_selection_preference=0.5,
answer_loss_cutoff=100,
max_num_rows=64,
max_num_columns=32,
average_logits_per_cell=True,
select_one_column=True,
allow_empty_column_selection=False,
init_cell_selection_weights_to_zero=True,
reset_position_index_per_cell=True,
disable_per_token_loss=False,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.max_position_embeddings = max_position_embeddings
self.type_vocab_sizes = type_vocab_sizes
self.type_sequence_label_size = type_sequence_label_size
self.positive_weight = positive_weight
self.num_aggregation_labels = num_aggregation_labels
self.num_labels = num_labels
self.aggregation_loss_importance = aggregation_loss_importance
self.use_answer_as_supervision = use_answer_as_supervision
self.answer_loss_importance = answer_loss_importance
self.use_normalized_answer_loss = use_normalized_answer_loss
self.huber_loss_delta = huber_loss_delta
self.temperature = temperature
self.agg_temperature = agg_temperature
self.use_gumbel_for_cells = use_gumbel_for_cells
self.use_gumbel_for_agg = use_gumbel_for_agg
self.average_approximation_function = average_approximation_function
self.cell_selection_preference = cell_selection_preference
self.answer_loss_cutoff = answer_loss_cutoff
self.max_num_rows = max_num_rows
self.max_num_columns = max_num_columns
self.average_logits_per_cell = average_logits_per_cell
self.select_one_column = select_one_column
self.allow_empty_column_selection = allow_empty_column_selection
self.init_cell_selection_weights_to_zero = init_cell_selection_weights_to_zero
self.reset_position_index_per_cell = reset_position_index_per_cell
self.disable_per_token_loss = disable_per_token_loss
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = []
for type_vocab_size in self.type_vocab_sizes:
token_type_ids.append(ids_tensor(shape=[self.batch_size, self.seq_length], vocab_size=type_vocab_size))
token_type_ids = tf.stack(token_type_ids, axis=2)
sequence_labels = None
token_labels = None
labels = None
numeric_values = None
numeric_values_scale = None
float_answer = None
aggregation_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
labels = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
numeric_values = ids_tensor([self.batch_size, self.seq_length], vocab_size=2, dtype=tf.float32)
numeric_values_scale = ids_tensor([self.batch_size, self.seq_length], vocab_size=2, dtype=tf.float32)
float_answer = ids_tensor([self.batch_size], vocab_size=2, dtype=tf.float32)
aggregation_labels = ids_tensor([self.batch_size], self.num_aggregation_labels)
config = self.get_config()
return (
config,
input_ids,
input_mask,
token_type_ids,
sequence_labels,
token_labels,
labels,
numeric_values,
numeric_values_scale,
float_answer,
aggregation_labels,
)
def get_config(self):
return TapasConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_sizes=self.type_vocab_sizes,
initializer_range=self.initializer_range,
positive_weight=self.positive_weight,
num_aggregation_labels=self.num_aggregation_labels,
num_labels=self.num_labels,
aggregation_loss_importance=self.aggregation_loss_importance,
use_answer_as_supervision=self.use_answer_as_supervision,
answer_loss_importance=self.answer_loss_importance,
use_normalized_answer_loss=self.use_normalized_answer_loss,
huber_loss_delta=self.huber_loss_delta,
temperature=self.temperature,
agg_temperature=self.agg_temperature,
use_gumbel_for_cells=self.use_gumbel_for_cells,
use_gumbel_for_agg=self.use_gumbel_for_agg,
average_approximation_function=self.average_approximation_function,
cell_selection_preference=self.cell_selection_preference,
answer_loss_cutoff=self.answer_loss_cutoff,
max_num_rows=self.max_num_rows,
max_num_columns=self.max_num_columns,
average_logits_per_cell=self.average_logits_per_cell,
select_one_column=self.select_one_column,
allow_empty_column_selection=self.allow_empty_column_selection,
init_cell_selection_weights_to_zero=self.init_cell_selection_weights_to_zero,
reset_position_index_per_cell=self.reset_position_index_per_cell,
disable_per_token_loss=self.disable_per_token_loss,
)
def create_and_check_model(
self,
config,
input_ids,
input_mask,
token_type_ids,
sequence_labels,
token_labels,
labels,
numeric_values,
numeric_values_scale,
float_answer,
aggregation_labels,
):
model = TFTapasModel(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
inputs.pop("attention_mask")
result = model(inputs)
inputs.pop("token_type_ids")
result = model(inputs)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_for_masked_lm(
self,
config,
input_ids,
input_mask,
token_type_ids,
sequence_labels,
token_labels,
labels,
numeric_values,
numeric_values_scale,
float_answer,
aggregation_labels,
):
model = TFTapasForMaskedLM(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
"labels": token_labels,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_sequence_classification(
self,
config,
input_ids,
input_mask,
token_type_ids,
sequence_labels,
token_labels,
labels,
numeric_values,
numeric_values_scale,
float_answer,
aggregation_labels,
):
config.num_labels = self.num_labels
model = TFTapasForSequenceClassification(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"labels": sequence_labels,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_question_answering(
self,
config,
input_ids,
input_mask,
token_type_ids,
sequence_labels,
token_labels,
labels,
numeric_values,
numeric_values_scale,
float_answer,
aggregation_labels,
):
# inference: without aggregation head (SQA). Model only returns logits
sqa_config = copy.copy(config)
sqa_config.num_aggregation_labels = 0
sqa_config.use_answer_as_supervision = False
model = TFTapasForQuestionAnswering(config=sqa_config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length))
# inference: with aggregation head (WTQ, WikiSQL-supervised). Model returns logits and aggregation logits
model = TFTapasForQuestionAnswering(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels))
# training: can happen in 3 main ways
# case 1: conversational (SQA)
model = TFTapasForQuestionAnswering(config=sqa_config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
"labels": labels,
}
result = model(inputs)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length))
# case 2: weak supervision for aggregation (WTQ)
model = TFTapasForQuestionAnswering(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
"labels": labels,
"numeric_values": numeric_values,
"numeric_values_scale": numeric_values_scale,
"float_answer": float_answer,
}
result = model(inputs)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels))
# case 3: strong supervision for aggregation (WikiSQL-supervised)
wikisql_config = copy.copy(config)
wikisql_config.use_answer_as_supervision = False
model = TFTapasForQuestionAnswering(config=wikisql_config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
"labels": labels,
"aggregation_labels": aggregation_labels,
}
result = model(inputs)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
token_type_ids,
sequence_labels,
token_labels,
labels,
numeric_values,
numeric_values_scale,
float_answer,
aggregation_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tensorflow_probability
@require_tf
class TFTapasModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
TFTapasModel,
TFTapasForMaskedLM,
TFTapasForSequenceClassification,
TFTapasForQuestionAnswering,
)
if is_tf_available()
else ()
)
test_head_masking = False
test_onnx = False
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict:
inputs_dict = copy.deepcopy(inputs_dict)
if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING):
inputs_dict = {
k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
if isinstance(v, tf.Tensor) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING):
inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in get_values(TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING):
inputs_dict["labels"] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32
)
inputs_dict["aggregation_labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
inputs_dict["numeric_values"] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.float32
)
inputs_dict["numeric_values_scale"] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.float32
)
inputs_dict["float_answer"] = tf.zeros(self.model_tester.batch_size, dtype=tf.float32)
elif model_class in get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING):
inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in get_values(TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING):
inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in [
*get_values(TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING),
*get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING),
*get_values(TF_MODEL_FOR_MASKED_LM_MAPPING),
*get_values(TF_MODEL_FOR_PRETRAINING_MAPPING),
*get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING),
]:
inputs_dict["labels"] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32
)
return inputs_dict
def setUp(self):
self.model_tester = TFTapasModelTester(self)
self.config_tester = ConfigTester(self, config_class=TapasConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def prepare_tapas_single_inputs_for_inference():
# Here we prepare a single table-question pair to test TAPAS inference on:
data = {
"Footballer": ["<NAME>", "<NAME>"],
"Age": ["33", "35"],
}
queries = "Which footballer is 33 years old?"
table = pd.DataFrame.from_dict(data)
return table, queries
def prepare_tapas_batch_inputs_for_inference():
# Here we prepare a batch of 2 table-question pairs to test TAPAS inference on:
data = {
"Footballer": ["<NAME>", "<NAME>"],
"Age": ["33", "35"],
"Number of goals": ["712", "750"],
}
queries = ["Which footballer is 33 years old?", "How many goals does Ronaldo have?"]
table = pd.DataFrame.from_dict(data)
return table, queries
def prepare_tapas_batch_inputs_for_training():
# Here we prepare a DIFFERENT batch of 2 table-question pairs to test TAPAS training on:
data = {
"Footballer": ["<NAME>", "<NAME>"],
"Age": ["33", "35"],
"Number of goals": ["712", "750"],
}
queries = ["Which footballer is 33 years old?", "What's the total number of goals?"]
table =
|
pd.DataFrame.from_dict(data)
|
pandas.DataFrame.from_dict
|
import numpy as np
import random
import matplotlib.pyplot as plt
from sklearn.metrics import silhouette_score
import pandas as pd
import warnings
from multiprocessing import Pool
from numba import njit, prange
def euclidean_distance_per_feature(a, b):
"""Compute the euclidean distance per shared feature between two numpy arrays.
Parameters
----------
a: numpy array
b: numpy array
Returns
-------
numpy array
"""
diff=a-b
n_feature = len(diff)-np.isnan(diff).sum()
if n_feature == 0:
print("warning was about to divide by zero")
return 10000*len(diff)
return np.sqrt(np.nansum(diff*diff))/n_feature
@njit(parallel=True)
def dist_edpf(XA,XB):
'''
dist(u=XA[i], v=XB[j]) is computed and stored in the ij'th entry.
where dist is the above euclidean_distance_per_feature
Parameters
----------
XA : numpy array
XB : numpy array
Returns
-------
arr : numpy array
'''
n_a = len(XA)
n_b = len(XB)
arr = np.empty((n_a,n_b))
for i in prange(n_a):
for j in prange(n_b):
diff=XA[i]-XB[j]
arr[i][j]=np.sqrt(np.nansum(diff*diff))/(len(diff)-np.isnan(diff).sum())
return arr
class KMeans(object):
'''
K-Means clustering
----------
continue
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
init :
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
n_init : int, default: 1
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
max_iter : int, default: 1000
Maximum number of iterations of the k-means algorithm for a
single run.
tolerance : float, default : .00001
Attributes
----------
centroids_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
'''
def __init__(self, n_clusters=8, init='k-means++', n_init=1,
max_iter=300, tolerance = 1e-4, verbose = False):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tolerance = tolerance
self.n_init = n_init
self.verbose = verbose
self.centroids_ = None
self.labels_ = None
def _initialize_centroids(self, X):
'''
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
k-means++ initialization for centroids
'''
# use Kmeans plus plus
self.centroids_ = self._kmeans_plus_plus(X)
def _kmeans_plus_plus(self, X):
'''
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
helper function to initialize centroids in a smart way
'''
k=self.n_clusters
centroids = np.empty((k, X.shape[1]))
for j in range(k):
if j == 0:
centroids[j] = X[np.random.choice(X.shape[0])]
else:
# compute square of euclidean distance per feature to nearest centroid
dists = dist_edpf(X, centroids[:j].reshape(-1, X.shape[1]))
dists2 = dists.min(axis = 1)
# pick random choice with probabilty propertional to distances
ind = np.random.choice(X.shape[0], p = dists2/dists2.sum())
centroids[j] = X[ind]
return centroids
def _assign_clusters(self, X):
'''
computes euclidean distance per feature from each point to each centroid
and assigns point to closest centroid) assigns self.labels_
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Data points to assign to clusters based on distance metric
'''
labels = self.predict(X)
self.labels_ = labels
def _compute_centroids(self, X):
'''
compute the centroids for the datapoints in X from the current values
of self.labels_
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Data points to assign to clusters based on distance metric
returns new centroids
'''
centroids=[]
for j in range(self.n_clusters):
arr = X[self.labels_==j]
if len(arr)-np.isnan(arr).sum()==0:
arr = X
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
centroids.append(np.nanmean(arr, axis=0))
return np.array(centroids)
def fit(self, X):
''''
Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster.
'''
self._initialize_centroids(X)
for i in range(self.max_iter):
self._assign_clusters(X)
new_centroids = self._compute_centroids(X)
if (np.array([euclidean_distance_per_feature(*a) for a in zip(self.centroids_,new_centroids)]) < self.tolerance).all():
if self.verbose:
print('Converged on interation {}'.format(i))
return i
break
# re-assign centroids
self.centroids_ = new_centroids
return i
def predict(self, X):
'''
Optional method: predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
'''
distances = dist_edpf(X, self.centroids_)
return distances.argmin(axis = 1)
def score(self, X):
'''
return the total residual sum of squares
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
The SSE
'''
labels = self.predict(X)
lst = []
for i in range(len(labels)):
lst.append(euclidean_distance_per_feature(X[i],self.centroids_[labels[i]]))
arr = np.array(lst)
SSE = np.sum(arr)
return SSE
def load_data(filename,n):
'''
builds a dataframe from filename csv for which the index columns start at n
Parameters
----------
filename : string, name of csv to read in
n : int, what the index columns START
Returns
-------
data : numpy array of the nonindex columns
df[index_cols] : pandas dataframe, just the psudoindex columns
'''
df = pd.read_csv(filename+'.csv')
columns = list(df.columns)
index_cols = [columns[n],columns[n+1]]
print(columns[1])
value_cols = columns[n+2:]
data = df[value_cols].values
return data, df[index_cols]
def elbow_plot(data, plotname, n):
'''
builds a elbow plot and saves it as plotname
Parameters
----------
data : numpy array of the nonindex columns
plotname : string, what to save the fig as
n : int, how many clusters to consider
'''
plt.clf()
ks = np.arange(2, n+1)
sses = []
for k in ks:
model = KMeans(n_clusters = k, init='k-means++', max_iter=300, verbose=False, tolerance=0.00000001, n_init=3)
model.fit(data)
sc=model.score(data)
sses.append(sc)
print(k)
plt.plot(ks, sses)
plt.xlabel('Number of clusters')
plt.ylabel('SSE')
plt.title('Elbow Plot')
plt.savefig(plotname)
plt.show()
def silhouette(data, k, dist):
'''
builds a elbow plot and saves it as plotname
Parameters
----------
data : numpy array of the nonindex columns
k : int, clusters build the silhouette score for
dist : numpy array of the precomputed distance between rows of the data
Returns
-------
silhouette_score : float, from sklearns function
'''
model = KMeans(n_clusters = k, init='k-means++',tolerance=0.00000001, n_init=10, verbose=False)
model.fit(data)
labels = model.labels_
return silhouette_score(dist, labels, metric="precomputed")
def show_countries_in_clusters(data,k,df_ind):
'''
shows on screan what countries year psudoindexs are ending up in each group,
for k clusters
Parameters
----------
data : numpy array of the nonindex columns
k : int, clusters build the silhouette score for
df_ind : pandas dataframe, country year psudoindexs
'''
model = KMeans(n_clusters = k, init='k-means++',tolerance=.000000000000001, n_init=3, verbose=True)
model.fit(data)
labels = model.labels_
for i in range(k):
print("########################## label {} ##########################".format(i))
print(df_ind[labels==i][['Country Name','level_1']],'\n')
def write_multi_index_clusters(data,k,df_ind):
'''
writes to disd and returns the dataframe of labels
Parameters
----------
data : numpy array of the nonindex columns
k : int, clusters build the silhouette score for
df_ind : pandas dataframe, country year psudoindexs
Returns
-------
df : pandas dataframe, the results of the clustering written as the column named label
'''
model = KMeans(n_clusters = k, init='k-means++',tolerance=.000000000000001, n_init=10, verbose=True)
model.fit(data)
labels = model.labels_
df_lb=pd.DataFrame(labels, columns=['label'])
df=
|
pd.concat([df_ind,df_lb],ignore_index=True, axis=1)
|
pandas.concat
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'), Timestamp(
'20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},"
"s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s,
us, lhs, rhs))
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5, seconds=4)
ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__',
'__rfloordiv__', '__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
# ## datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__',
'__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1[0] - dt1)
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td2[0] - dt2)
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1 - dt1)
self.assertRaises(TypeError, lambda: td2 - dt2)
def test_sub_single_tz(self):
# GH12290
s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])
s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')])
result = s1 - s2
expected = Series([Timedelta('2days')])
assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta('-2days')])
assert_series_equal(result, expected)
def test_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
datetime_series = Series([NaT, Timestamp('19900315')])
nat_series_dtype_timedelta = Series(
[NaT, NaT], dtype='timedelta64[ns]')
nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')
single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp)
assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
assert_series_equal(datetime_series - single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + datetime_series
assert_series_equal(datetime_series - single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta + datetime_series,
nat_series_dtype_timestamp)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
assert_series_equal(nat_series_dtype_timestamp - NaT,
nat_series_dtype_timestamp)
assert_series_equal(-NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
with tm.assertRaises(TypeError):
timedelta_series - single_nat_dtype_datetime
# addition
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
|
assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
|
pandas.util.testing.assert_series_equal
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from nltk import word_tokenize
from scipy.sparse import csr_matrix
from operator import itemgetter
import pandas as pd
def tokenize_unigram (tweets):
"""
Generate uni-gram tokenization
# Input : list of cleansed tweet texts
# Output : list of tokenized tweets
"""
tweets_tok = []
for tw in tweets:
tweets_tok.append(word_tokenize(tw))
return tweets_tok
def tokenize_bigram (tweets):
"""
Generate bi-gram tokenization
# Input : list of cleansed tweet texts
# Output : list of bi-gram tokenized tweets
"""
tweets_tok = []
for tw in tweets:
unigram = word_tokenize(tw)
tweet = []
for i in range(len(unigram)-1) :
tweet.append( (unigram[i],unigram[i+1]) )
tweets_tok.append(tweet)
return tweets_tok
def tweet_term (tweets_tok) :
"""
Generate tweet x term matrix
# Input : list of tokenized tweets (uni or bi-gram)
# Output : csr sparse matrix
"""
indptr = [0]
indices = []
data = []
vocabulary = {}
for value in tweets_tok:
for term in value :
index = vocabulary.setdefault(term, len(vocabulary))
indices.append(index)
data.append(1)
indptr.append(len(indices))
return csr_matrix((data, indices, indptr), dtype=int)
def tweet_term_df (tweets_tok):
"""
Generate tweet x term matrix
# Input : list of tokenized tweets (uni or bi-gram)
# Output : dataframe
"""
indptr = [0]
indices = []
data = []
vocabulary = {}
for value in tweets_tok:
for term in value :
index = vocabulary.setdefault(term, len(vocabulary))
indices.append(index)
data.append(1)
indptr.append(len(indices))
csr = csr_matrix((data, indices, indptr), dtype=int)
csr_array = csr.toarray()
sorted_voca = sorted(vocabulary.items(), key=itemgetter(1))
new_voca=[]
for i in sorted_voca:
new_voca.append(i[0])
return
|
pd.DataFrame(csr_array, columns=new_voca)
|
pandas.DataFrame
|
import pandas as pd
import os
from training.config import Config
class GeneExp:
"""
Class to get RNA-Seq data from given cell type.
Alter it and provide for classification.
"""
def __init__(self, cfg, chr):
self.cfg = cfg
self.gene_info = None
self.pc_data = None
self.nc_data = None
self.rb_data = None
self.chr = str(chr)
self.gene_exp_path = os.path.join(cfg.downstream_dir, "RNA-seq")
self.gene_exp_file = os.path.join(self.gene_exp_path, "Ensembl_v65.Gencode_v10.ENSG.gene_info")
self.pc_file = os.path.join(self.gene_exp_path, "57epigenomes.RPKM.pc.gz")
self.nc_file = os.path.join(self.gene_exp_path, "57epigenomes.RPKM.nc.gz")
self.rb_file = os.path.join(self.gene_exp_path, "57epigenomes.RPKM.rb.gz")
if cfg.cell == "GM12878":
self.cell_column = "E116"
elif cfg.cell == "H1hESC":
self.cell_column = "E003"
elif cfg.cell == "HFFhTERT":
self.cell_column = "E055"
def get_rna_seq(self):
"""
get_rna_seq() -> No return object
Gets RNA-Deq data for PC, NC, and RB modes.
Args:
NA
"""
self.gene_info = pd.read_csv(self.gene_exp_file, sep="\s+", header=None)
self.gene_info.rename(
columns={0: 'gene_id', 1: 'chr', 2: 'start', 3: 'end', 4: 'no_idea', 5: 'type', 6: 'gene',
7: 'info'}, inplace=True)
"get protein coding, non protein coding, and reference free modes"
self.pc_data =
|
pd.read_csv(self.pc_file, compression='gzip', header=0, sep="\s+")
|
pandas.read_csv
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.