prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""hgboost: Hyperoptimized Gradient Boosting library.
Contributors: https://github.com/erdogant/hgboost
"""
import warnings
warnings.filterwarnings("ignore")
import classeval as cle
from df2onehot import df2onehot
import treeplot as tree
import colourmap
import pypickle
import os
import numpy as np
import pandas as pd
import wget
from sklearn.metrics import mean_squared_error, cohen_kappa_score, mean_absolute_error, log_loss, roc_auc_score, f1_score, r2_score
from sklearn.ensemble import VotingClassifier, VotingRegressor
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split, StratifiedKFold, cross_val_score
import xgboost as xgb
import catboost as ctb
try:
import lightgbm as lgb
except:
pass
from hyperopt import fmin, tpe, STATUS_OK, Trials, hp
from tqdm import tqdm
import time
import copy
# %%
class hgboost:
"""Create a class hgboost that is instantiated with the desired method."""
def __init__(self, max_eval=250, threshold=0.5, cv=5, test_size=0.2, val_size=0.2, top_cv_evals=10, is_unbalance=True, random_state=None, n_jobs=-1, verbose=3):
"""Initialize hgboost with user-defined parameters.
Parameters
----------
max_eval : int, (default : 250)
Search space is created on the number of evaluations.
threshold : float, (default : 0.5)
Classification threshold. In case of two-class model this is 0.5
cv : int, optional (default : 5)
Cross-validation. Specifying the test size by test_size.
top_cv_evals : int, (default : 10)
Number of top best performing models that is evaluated.
If set to None, each iteration (max_eval) is tested.
If set to 0, cross validation is not performed.
test_size : float, (default : 0.2)
Splitting train/test set with test_size=0.2 and train=1-test_size.
val_size : float, (default : 0.2)
Setup the validation set. This part is kept entirely separate from the test-size.
is_unbalance : Bool, (default: True)
Control the balance of positive and negative weights, useful for unbalanced classes.
xgboost clf : sum(negative instances) / sum(positive instances)
catboost clf : sum(negative instances) / sum(positive instances)
lightgbm clf : balanced
False: grid search
random_state : int, (default : None)
Fix the random state for validation set and test set. Note that is not used for the crossvalidation.
n_jobs : int, (default : -1)
The number of jobs to run in parallel for fit. None means 1 unless in a joblib.parallel_backend context.
-1 means using all processors.
verbose : int, (default : 3)
Print progress to screen.
0: None, 1: ERROR, 2: WARN, 3: INFO, 4: DEBUG, 5: TRACE
Returns
-------
None.
References
----------
* https://github.com/hyperopt/hyperopt
* https://www.districtdatalabs.com/parameter-tuning-with-hyperopt
* https://scikit-learn.org/stable/modules/model_evaluation.html
"""
if (threshold is None) or (threshold <= 0): raise ValueError('[hgboost] >Error: [threshold] must be >0 and not [None]')
if (max_eval is None) or (max_eval <= 0): max_eval=1
if top_cv_evals is None: max_eval=0
if (test_size is None) or (test_size <= 0): raise ValueError('[hgboost] >Error: test_size must be >0 and not [None] Note: the final model is learned on the entire dataset. [test_size] may help you getting a more robust model.')
if (val_size is not None) and (val_size<=0): val_size=None
self.max_eval=max_eval
self.top_cv_evals=top_cv_evals
self.threshold=threshold
self.test_size=test_size
self.val_size=val_size
self.algo=tpe.suggest
self.cv=cv
self.random_state=random_state
self.n_jobs=n_jobs
self.verbose=verbose
self.is_unbalance = is_unbalance
def _fit(self, X, y, pos_label=None):
"""Fit the best performing model.
Description
-----------
Minimize a function over a hyperparameter space.
More realistically: *explore* a function over a hyperparameter space
according to a given algorithm, allowing up to a certain number of
function evaluations. As points are explored, they are accumulated in
"trials".
Parameters
----------
X : pd.DataFrame
Input dataset.
y : array-like.
Response variable.
pos_label : string/int.
In case of classification (_clf), the model will be fitted on the pos_label that is in y.
Returns
-------
results : dict
* best_params: Best performing parameters.
* summary: Summary of the models with the loss and other variables.
* trials: All model results.
* model: Best performing model.
* val_results: Results on independent validation dataset.
"""
# Check input data
X, y, self.pos_label=_check_input(X, y, pos_label, self.method, verbose=self.verbose)
# Recaculate test size. This should be the percentage of the total dataset after removing the validation set.
if (self.val_size is not None) and (self.val_size > 0):
self.test_size = np.round((self.test_size * X.shape[0]) / (X.shape[0] - (self.val_size * X.shape[0])), 2)
# Print to screen
if self.verbose>=3:
print('[hgboost] >method: %s' %(self.method))
print('[hgboost] >eval_metric: %s' %(self.eval_metric))
print('[hgboost] >greater_is_better: %s' %(self.greater_is_better))
# Set validation set
self._set_validation_set(X, y)
# Find best parameters
self.model, self.results=self._HPOpt()
# Fit on all data using best parameters
if self.verbose>=3: print('[hgboost] >Retrain [%s] on the entire dataset with the optimal parameters settings.' %(self.method))
self.model.fit(X, y)
# Return
return self.results
def _classification(self, X, y, eval_metric, greater_is_better, params):
# Gather for method, the default metric and greater is better.
self.eval_metric, self.greater_is_better =_check_eval_metric(self.method, eval_metric, greater_is_better)
# Import search space for the specific function
if params == 'default': params = _get_params(self.method, eval_metric=self.eval_metric, y=y, pos_label=self.pos_label, is_unbalance=self.is_unbalance, verbose=self.verbose)
self.space = params
# Fit model
self.results = self._fit(X, y, pos_label=self.pos_label)
# Fin
if self.verbose>=3: print('[hgboost] >Fin!')
def _regression(self, X, y, eval_metric, greater_is_better, params):
# Gather for method, the default metric and greater is better.
self.eval_metric, self.greater_is_better = _check_eval_metric(self.method, eval_metric, greater_is_better)
# Import search space for the specific function
if params == 'default': params = _get_params(self.method, eval_metric=self.eval_metric, verbose=self.verbose)
self.space = params
# Fit model
self.results = self._fit(X, y)
# Fin
if self.verbose>=3: print('[hgboost] >Fin!')
def xgboost_reg(self, X, y, eval_metric='rmse', greater_is_better=False, params='default'):
"""Xgboost Regression with parameter hyperoptimization.
Parameters
----------
X : pd.DataFrame.
Input dataset.
y : array-like
Response variable.
eval_metric : str, (default : 'rmse').
Evaluation metric for the regressor model.
* 'rmse': root mean squared error.
* 'mse': mean squared error.
* 'mae': mean absolute error.
greater_is_better : bool (default : False).
If a loss, the output of the python function is negated by the scorer object, conforming to the cross validation convention that scorers return higher values for better models.
params : dict, (default : 'default').
Hyper parameters.
Returns
-------
results : dict
* best_params: Best performing parameters.
* summary: Summary of the models with the loss and other variables.
* trials: All model results.
* model: Best performing model.
* val_results: Results on independent validation dataset.
"""
if self.verbose>=3: print('[hgboost] >Start hgboost regression..')
# Method
self.method='xgb_reg'
# Run method
self._regression(X, y, eval_metric, greater_is_better, params)
# Return
return self.results
def lightboost_reg(self, X, y, eval_metric='rmse', greater_is_better=False, params='default'):
"""Light Regression with parameter hyperoptimization.
Parameters
----------
X : pd.DataFrame.
Input dataset.
y : array-like.
Response variable.
eval_metric : str, (default : 'rmse').
Evaluation metric for the regressor model.
* 'rmse': root mean squared error.
* 'mse': mean squared error.
* 'mae': mean absolute error.
greater_is_better : bool (default : False).
If a loss, the output of the python function is negated by the scorer object, conforming to the cross validation convention that scorers return higher values for better models.
params : dict, (default : 'default').
Hyper parameters.
Returns
-------
results : dict
* best_params: Best performing parameters.
* summary: Summary of the models with the loss and other variables.
* trials: All model results.
* model: Best performing model.
* val_results: Results on independent validation dataset.
"""
if self.verbose>=3: print('[hgboost] >Start hgboost regression..')
# Method
self.method='lgb_reg'
# Run method
self._regression(X, y, eval_metric, greater_is_better, params)
# Return
return self.results
def catboost_reg(self, X, y, eval_metric='rmse', greater_is_better=False, params='default'):
"""Catboost Regression with parameter hyperoptimization.
Parameters
----------
X : pd.DataFrame.
Input dataset.
y : array-like.
Response variable.
eval_metric : str, (default : 'rmse').
Evaluation metric for the regressor model.
* 'rmse': root mean squared error.
* 'mse': mean squared error.
* 'mae': mean absolute error.
greater_is_better : bool (default : False).
If a loss, the output of the python function is negated by the scorer object, conforming to the cross validation convention that scorers return higher values for better models.
params : dict, (default : 'default').
Hyper parameters.
Returns
-------
results : dict.
* best_params: Best performing parameters.
* summary: Summary of the models with the loss and other variables.
* trials: All model results.
* model: Best performing model.
* val_results: Results on independent validation dataset.
"""
if self.verbose>=3: print('[hgboost] >Start hgboost regression..')
# Method
self.method='ctb_reg'
# Run method
self._regression(X, y, eval_metric, greater_is_better, params)
# Return
return self.results
def xgboost(self, X, y, pos_label=None, method='xgb_clf', eval_metric=None, greater_is_better=None, params='default'):
"""Xgboost Classification with parameter hyperoptimization.
Parameters
----------
X : pd.DataFrame.
Input dataset.
y : array-like.
Response variable.
pos_label : string/int.
Fit the model on the pos_label that that is in [y].
method : String, (default : 'auto').
* 'xgb_clf': XGboost two-class classifier
* 'xgb_clf_multi': XGboost multi-class classifier
eval_metric : str, (default : None).
Evaluation metric for the regressor of classification model.
* 'auc': area under ROC curve (default for two-class)
* 'kappa': (default for multi-class)
* 'f1': F1-score
* 'logloss'
* 'auc_cv': Compute average auc per iteration in each cross. This approach is computational expensive.
greater_is_better : bool.
If a loss, the output of the python function is negated by the scorer object, conforming to the cross validation convention that scorers return higher values for better models.
* auc : True -> two-class
* kappa : True -> multi-class
Returns
-------
results : dict.
* best_params: Best performing parameters.
* summary: Summary of the models with the loss and other variables.
* trials: All model results.
* model: Best performing model.
* val_results: Results on independent validation dataset.
"""
if self.verbose>=3: print('[hgboost] >Start hgboost classification..')
self.method = method
self.pos_label = pos_label
# Run method
self._classification(X, y, eval_metric, greater_is_better, params)
# Return
return self.results
def catboost(self, X, y, pos_label=None, eval_metric='auc', greater_is_better=True, params='default'):
"""Catboost Classification with parameter hyperoptimization.
Parameters
----------
X : pd.DataFrame.
Input dataset.
y : array-like.
Response variable.
pos_label : string/int.
Fit the model on the pos_label that that is in [y].
eval_metric : str, (default : 'auc').
Evaluation metric for the regressor of classification model.
* 'auc': area under ROC curve (default for two-class)
* 'kappa': (default for multi-class)
* 'f1': F1-score
* 'logloss'
* 'auc_cv': Compute average auc per iteration in each cross. This approach is computational expensive.
greater_is_better : bool (default : True).
If a loss, the output of the python function is negated by the scorer object, conforming to the cross validation convention that scorers return higher values for better models.
Returns
-------
results : dict.
* best_params: Best performing parameters.
* summary: Summary of the models with the loss and other variables.
* trials: All model results.
* model: Best performing model.
* val_results: Results on independent validation dataset.
"""
if self.verbose>=3: print('[hgboost] >Start hgboost classification..')
self.method = 'ctb_clf'
self.pos_label = pos_label
# Run method
self._classification(X, y, eval_metric, greater_is_better, params)
# Return
return self.results
def lightboost(self, X, y, pos_label=None, eval_metric='auc', greater_is_better=True, params='default'):
"""Lightboost Classification with parameter hyperoptimization.
Parameters
----------
X : pd.DataFrame
Input dataset.
y : array-like
Response variable.
pos_label : string/int.
Fit the model on the pos_label that that is in [y].
eval_metric : str, (default : 'auc')
Evaluation metric for the regressor of classification model.
* 'auc': area under ROC curve (default for two-class)
* 'kappa': (default for multi-class)
* 'f1': F1-score
* 'logloss'
* 'auc_cv': Compute average auc per iteration in each cross. This approach is computational expensive.
greater_is_better : bool (default : True)
If a loss, the output of the python function is negated by the scorer object, conforming to the cross validation convention that scorers return higher values for better models.
Returns
-------
results : dict
* best_params: Best performing parameters.
* summary: Summary of the models with the loss and other variables.
* trials: All model results.
* model: Best performing model.
* val_results: Results on independent validation dataset.
"""
if self.verbose>=3: print('[hgboost] >Start hgboost classification..')
self.method = 'lgb_clf'
self.pos_label = pos_label
# Run method
self._classification(X, y, eval_metric, greater_is_better, params)
# Return
return self.results
def ensemble(self, X, y, pos_label=None, methods=['xgb_clf', 'ctb_clf', 'lgb_clf'], eval_metric=None, greater_is_better=None, voting='soft'):
"""Ensemble Classification with parameter hyperoptimization.
Description
-----------
Fit best model for xgboost, catboost and lightboost, and then combine the individual models to a new one.
Parameters
----------
X : pd.DataFrame
Input dataset.
y : array-like
Response variable.
pos_label : string/int.
Fit the model on the pos_label that that is in [y].
methods : list of strings, (default : ['xgb_clf','ctb_clf','lgb_clf']).
The models included for the ensemble classifier or regressor. The clf and reg models can not be combined.
* ['xgb_clf','ctb_clf','lgb_clf']
* ['xgb_reg','ctb_reg','lgb_reg']
eval_metric : str, (default : 'auc')
Evaluation metric for the regressor of classification model.
* 'auc': area under ROC curve (two-class classification : default)
greater_is_better : bool (default : True)
If a loss, the output of the python function is negated by the scorer object, conforming to the cross validation convention that scorers return higher values for better models.
* auc : True -> two-class
voting : str, (default : 'soft')
Combining classifier using a voting scheme.
* 'hard': using predicted classes.
* 'soft': using the Probabilities.
Returns
-------
results : dict
* best_params: Best performing parameters.
* summary: Summary of the models with the loss and other variables.
* model: Ensemble of the best performing models.
* val_results: Results on independent validation dataset.
"""
# Store parameters in object
self.results = {}
self.voting = voting
self.methods = methods
if np.all(list(map(lambda x: 'clf' in x, methods))):
if self.verbose>=3: print('[hgboost] >Create ensemble classification model..')
self.method = 'ensemble_clf'
elif np.all(list(map(lambda x: 'reg' in x, methods))):
if self.verbose>=3: print('[hgboost] >Create ensemble regression model..')
self.method = 'ensemble_reg'
else:
raise ValueError('[hgboost] >Error: The input [methods] must be of type "_clf" or "_reg" but can not be combined.')
# Check input data
X, y, self.pos_label = _check_input(X, y, pos_label, self.method, verbose=self.verbose)
# Gather for method, the default metric and greater is better.
self.eval_metric, self.greater_is_better = _check_eval_metric(self.method, eval_metric, greater_is_better)
# Store the clean initialization in hgb
hgb = copy.copy(self)
# Create independent validation set.
if self.method == 'ensemble_clf':
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=self.val_size, random_state=self.random_state, shuffle=True, stratify=y)
else:
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=self.val_size, random_state=self.random_state, shuffle=True)
# Hyperparameter optimization for boosting models
models = []
for method in methods:
# Make copy of clean init
hgbM = copy.copy(hgb)
hgbM.method = method
hgbM._classification(X_train, y_train, eval_metric, greater_is_better, 'default')
# Store
models.append((method, copy.copy(hgbM.model)))
self.results[method] = {}
self.results[method]['model'] = copy.copy(hgbM)
# Create the ensemble model
if self.verbose>=3: print('[hgboost] >Fit ensemble model with [%s] voting..' %(self.voting))
if self.method == 'ensemble_clf':
model = VotingClassifier(models, voting=voting, n_jobs=self.n_jobs)
model.fit(X, y==pos_label)
else:
model = VotingRegressor(models, n_jobs=self.n_jobs)
model.fit(X, y)
# Store ensemble model
self.model = model
# Validation error for the ensemble model
if self.verbose>=3: print('[hgboost] >Evalute [ensemble] model on independent validation dataset (%.0f samples, %.2g%%)' %(len(y_val), self.val_size * 100))
# Evaluate results on the same validation set
val_score, val_results = self._eval(X_val, y_val, model, verbose=2)
if self.verbose>=3: print('[hgboost] >[Ensemble] [%s]: %.4g on independent validation dataset' %(self.eval_metric, val_score['loss']))
# Validate each of the independent methods to show differences in loss-scoring
if self.val_size is not None:
self.X_val = X_val
self.y_val = y_val
for method in methods:
# Evaluation
val_score_M, val_results_M = self._eval(X_val, y_val, self.results[method]['model'].model, verbose=2)
# Store
self.results[method]['loss'] = val_score_M['loss']
self.results[method]['val_results'] = val_results_M
if self.verbose>=3: print('[hgboost] >[%s] [%s]: %.4g on independent validation dataset' %(method, self.eval_metric, val_score_M['loss']))
# Store
self.results['val_results'] = val_results
self.results['model'] = model
# self.results['summary'] = pd.concat([hgbX.results['summary'], hgbC.results['summary'], hgbL.results['summary']])
# Return
return self.results
def _set_validation_set(self, X, y):
"""Set the validation set.
Description
-----------
Here we separate a small part of the data as the validation set.
* The new data is stored in self.X and self.y
* The validation X and y are stored in self.X_val and self.y_val
"""
if self.verbose>=3: print('[hgboost] >Total dataset: %s ' %(str(X.shape)))
if (self.val_size is not None):
if '_clf' in self.method:
self.X, self.X_val, self.y, self.y_val = train_test_split(X, y, test_size=self.val_size, random_state=self.random_state, shuffle=True, stratify=y)
elif '_reg' in self.method:
self.X, self.X_val, self.y, self.y_val = train_test_split(X, y, test_size=self.val_size, random_state=self.random_state, shuffle=True)
if self.verbose>=3: print('[hgboost] >Validation set: %s ' %(str(self.X_val.shape)))
else:
self.X = X
self.y = y
self.X_val = None
self.y_val = None
def _HPOpt(self):
"""Hyperoptimization of the search space.
Description
-----------
Minimize a function over a hyperparameter space.
More realistically: *explore* a function over a hyperparameter space
according to a given algorithm, allowing up to a certain number of
function evaluations. As points are explored, they are accumulated in
"trials".
Returns
-------
model : object
Fitted model.
results : dict
* best_params: Best performing parameters.
* summary: Summary of the models with the loss and other variables.
* trials: All model results.
* model: Best performing model.
* val_results: Results on independent validation dataset.
"""
# Import the desired model-function for the classification/regression
disable = (False if (self.verbose<3) else True)
fn = getattr(self, self.method)
# Split train-test set. This set is used for parameter optimization. Note that parameters are shuffled and the train-test set is retained constant.
# This will make the comparison across parameters and not differences in train-test variances.
if '_clf' in self.method:
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=self.test_size, random_state=self.random_state, shuffle=True, stratify=self.y)
elif '_reg' in self.method:
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=self.test_size, random_state=self.random_state, shuffle=True)
if self.verbose>=3: print('[hgboost] >Train-set: %s ' %(str(self.X_train.shape)))
if self.verbose>=3: print('[hgboost] >Test-set: %s ' %(str(self.X_test.shape)))
if self.verbose>=3: print('[hgboost] >Hyperparameter optimization..')
# Hyperoptimization to find best performing model. Set the trials which is the object where all the HPopt results are stored.
trials=Trials()
best_params = fmin(fn=fn, space=self.space, algo=self.algo, max_evals=self.max_eval, trials=trials, show_progressbar=disable)
# Summary results
results_summary, model = self._to_df(trials, verbose=self.verbose)
# Cross-validation over the top n models. To speed up we can decide to further test only the best performing ones. The best performing model is returned.
if self.cv is not None:
model, results_summary, best_params = self._cv(results_summary, self.space, best_params)
# Create a basic model by using default parameters.
space_basic = {}
space_basic['fit_params'] = {'verbose': 0}
space_basic['model_params'] = {}
model_basic = getattr(self, self.method)
model_basic = fn(space_basic)['model']
comparison_results = {}
# Validation error
val_results = None
if (self.val_size is not None):
if self.verbose>=3: print('[hgboost] >Evalute best [%s] model on validation dataset (%.0f samples, %.2g%%)' %(self.method, len(self.y_val), self.val_size * 100))
# Evaluate results
val_score, val_results = self._eval(self.X_val, self.y_val, model, verbose=2)
val_score_basic, val_results_basic = self._eval(self.X_val, self.y_val, model_basic, verbose=2)
comparison_results['Model with HyperOptimized parameters (validation set)'] = val_results
comparison_results['Model with default parameters (validation set)'] = val_results_basic
if self.verbose>=3: print('[hgboost] >[%s]: %.4g using HyperOptimized parameters on validation set.' %(self.eval_metric, val_score['loss']))
if self.verbose>=3: print('[hgboost] >[%s]: %.4g using default (not optimized) parameters on validation set.' %(self.eval_metric, val_score_basic['loss']))
# Store validation results
results_summary = _store_validation_scores(results_summary, best_params, model_basic, val_score_basic, val_score, self.greater_is_better)
# Remove the model column
del results_summary['model']
# Store
results = {}
results['params'] = best_params
results['summary'] = results_summary
results['trials'] = trials
results['model'] = model
results['val_results'] = val_results
results['comparison_results'] = comparison_results
# Return
return model, results
def _cv(self, results_summary, space, best_params):
ascending = False if self.greater_is_better else True
results_summary['loss_mean'] = np.nan
results_summary['loss_std'] = np.nan
# Determine maximum folds
top_cv_evals = np.minimum(results_summary.shape[0], self.top_cv_evals)
idx = results_summary['loss'].sort_values(ascending=ascending).index[0:top_cv_evals]
if self.verbose>=3: print('[hgboost] >%.0d-fold cross validation for the top %.0d scoring models, Total nr. tests: %.0f' %(self.cv, len(idx), self.cv * len(idx)))
disable = (True if (self.verbose==0 or self.verbose>3) else False)
# Run over the top-scoring models.
for i in tqdm(idx, disable=disable):
scores = []
# Run over the cross-validations
for k in np.arange(0, self.cv):
# Split train-test set
if '_clf' in self.method:
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=self.test_size, random_state=None, shuffle=True, stratify=self.y)
elif '_reg' in self.method:
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=self.test_size, random_state=None, shuffle=True)
# Evaluate model
score, _ = self._train_model(results_summary['model'].iloc[i], space)
score.pop('model')
scores.append(score)
# Store mean and std summary
results_summary['loss_mean'].iloc[i] =
|
pd.DataFrame(scores)
|
pandas.DataFrame
|
import re
from functools import partial
import numpy as np
import pandas as pd
from sklearn.datasets import make_moons, make_blobs
from sklearn.metrics import adjusted_rand_score
from src.types import Dataset, CutFinding, Data, Cuts, Preprocessing, CostFunction
from src.cut_finding import find_kmodes_cuts, kernighan_lin, fid_mat, binning, linear_cuts
from src.loading import make_mindsets, make_likert_questionnaire, load_RETINAL, load_CANCER, load_SBM, load_LFR
from src.plotting import plot_cuts
from src.tangles import core_algorithm
from src.tree_tangles import TangleTree, compute_soft_predictions_children, compute_hard_predictions_node
from src.utils import change_lower, change_upper, normalize
from src.cost_functions import edges_cut_cost, implicit_cost
def get_dataset(args):
"""
Function that returns the desired dataset and the order function in the format that we expect.
Datasets are always in the format of
- xs: Features that we need for clustering, like questions for the questionnaire or the adjacency matrix for
the graph
- ys: Class label
Order functions are assumed to be functions that only need a bipartition as inputs and return the order
of that bipartion. We assume that all the other args['dataset'] are loaded via partial evaluation in this function.
args['dataset']
----------
dataset: SimpleNamespace
The args['dataset'] of the dataset to load
seed: int
The seed for the RNG
Returns
-------
data: Data
the dataset in various representation
"""
if args['experiment']['dataset'] == Dataset.mindsets:
xs, ys, cs = make_mindsets(mindset_sizes=args['dataset']['mindset_sizes'],
nb_questions=args['dataset']['nb_questions'],
nb_useless=args['dataset']['nb_useless'],
noise=args['dataset']['noise'],
seed=args['experiment']['seed'])
return Data(xs=xs, ys=ys, cs=cs)
if args['experiment']['dataset'] == Dataset.questionnaire_likert:
xs, ys, cs = make_likert_questionnaire(nb_samples=args['dataset']['nb_samples'],
nb_features=args['dataset']['nb_features'],
nb_mindsets=args['dataset']['nb_mindsets'],
centers=args['dataset']['centers'],
range_answers=args['dataset']['range_answers'],
seed=args['experiment']['seed'])
return Data(xs=xs, ys=ys, cs=cs)
if args['experiment']['dataset'] == Dataset.retinal:
xs, ys = load_RETINAL(root_path=args['root_dir'],
nb_bins=args['dataset']['nb_bins'],
max_idx=args['dataset']['max_idx'])
return Data(xs=xs, ys=ys)
if args['experiment']['dataset'] == Dataset.moons:
xs, ys = make_moons(n_samples=args['dataset']['n_samples'],
noise=args['dataset']['noise'],
random_state=args['experiment']['seed'])
return Data(xs=xs, ys=ys)
if args['experiment']['dataset'] == Dataset.breast_cancer_wisconsin:
xs, ys = load_CANCER(args['dataset']['nb_bins'])
return Data(xs=xs, ys=ys)
if args['experiment']['dataset'] == Dataset.SBM:
A, ys, G = load_SBM(block_sizes=args['dataset']['block_sizes'],
p_in=args['dataset']['p'],
p_out=args['dataset']['q'],
seed=args['experiment']['seed'])
return Data(ys=ys, A=A, G=G)
if args['experiment']['dataset'] == Dataset.gaussian_mixture:
xs, ys = make_blobs(n_samples=args['dataset']['blob_sizes'],
centers=args['dataset']['blob_centers'],
n_features=args['dataset']['blob_centers'],
cluster_std=args['dataset']['blob_variances'],
random_state=args['experiment']['seed'])
return Data(xs=xs, ys=ys)
if args['experiment']['dataset'] == Dataset.LFR:
A, ys, G = load_LFR(nb_nodes=args['dataset']['nb_nodes'],
tau1=args['dataset']['tau1'],
tau2=args['dataset']['tau2'],
mu=args['dataset']['mu'],
average_degree=args['dataset']['average_degree'],
min_community=args['dataset']['min_community'],
seed=args['experiment']['seed'])
return Data(ys=ys, A=A, G=G)
if args['experiment']['dataset'] == Dataset.wave:
df = pd.read_csv('datasets/waveform.csv')
xs = df[df.columns[:-1]].to_numpy()
ys = df[df.columns[-1]].to_numpy()
return Data(xs=xs, ys=ys)
raise ValueError('Wrong name for a dataset')
def get_cuts(data, args, verbose):
"""
Given a set of points or an adjacency matrix this function returns the set of cuts that we will use
to compute tangles. If it makes sense it computes the names and equations of the cuts for better interpretability
and post-processing.
Parameters
----------
data: Data
all the input data in various representations
args: SimpleNamespace
The arguments to the program
verbose: int
the verbose level of the printing
Returns
-------
cuts: Cuts
the cuts that we will use
"""
if args['experiment']['cut_finding'] == CutFinding.features:
values = (data.xs == True).T
return Cuts(values=values)
if args['experiment']['cut_finding'] == CutFinding.binning:
values, names = binning(xs=data.xs,
range_answers=args['cut_finding']['range_answers'],
n_bins=args['cut_finding']['n_bins'])
return Cuts(values=values, names=names)
if args['experiment']['cut_finding'] == CutFinding.Kernighan_Lin:
values = kernighan_lin(A=data.A,
nb_cuts=args['cut_finding']['nb_cuts'],
lb_f=args['cut_finding']['lb_f'],
seed=args['experiment']['seed'],
verbose=verbose)
values = np.unique(values, axis=0)
return Cuts(values=values)
if args['experiment']['cut_finding'] == CutFinding.kmodes:
values = find_kmodes_cuts(xs=data.xs,
max_nb_clusters=args['cut_finding']['max_nb_clusters'])
values = np.unique(values, axis=0)
return Cuts(values=values)
if args['experiment']['cut_finding'] == CutFinding.Fiduccia_Mattheyses:
values = fid_mat(xs=data.A,
nb_cuts=args['cut_finding']['nb_cuts'],
lb_f=args['cut_finding']['lb_f'],
seed=args['experiment']['seed'],
verbose=verbose)
values = np.unique(values, axis=0)
return Cuts(values=values)
if args['experiment']['cut_finding'] == CutFinding.linear:
values, equations = linear_cuts(xs=data.xs,
equations=args['cut_finding']['equations'],
verbose=verbose)
return Cuts(values=values, equations=equations)
raise ValueError('Wrong name for a cut finding function')
def apply_preprocess(data, args):
if args['experiment']['preprocessing'] == Preprocessing.none:
return data
if args['experiment']['preprocessing'] == Preprocessing.feature_map:
raise NotImplementedError('TODO')
if args['experiment']['preprocessing'] == Preprocessing.knn_graph:
raise NotImplementedError('TODO')
if args['experiment']['preprocessing'] == Preprocessing.radius_neighbors_graph:
raise NotImplementedError('TODO')
raise ValueError('Wrong name for a preprocessing function')
def get_cost_function(data, args):
if args['experiment']['cost_function'] == CostFunction.implicit:
if data.xs is None:
raise ValueError('You need xs to compute the implicit cost function')
return partial(implicit_cost, data.xs, args['cost_function']['nb_points'])
if args['experiment']['cost_function'] == CostFunction.nb_edges_cut:
if data.A is None:
raise ValueError('You need A to compute the edge cost')
return partial(edges_cut_cost, data.A)
raise ValueError('Wrong name for a cost function')
def compute_cost_and_order_cuts(cuts, cost_function):
"""
Compute the cost of a series of cuts and costs them according to their cost
Parameters
----------
cuts: Cuts
the cuts that we will consider
cost_function: function
The order function
Returns
-------
cuts: Cuts
the cuts ordered by costs
"""
cost_cuts = np.zeros(len(cuts.values), dtype=float)
for i_cut, cut in enumerate(cuts.values):
cost_cuts[i_cut] = cost_function(cut)
idx = np.argsort(cost_cuts)
cuts.values = cuts.values[idx]
cuts.costs = cost_cuts[idx]
if cuts.names is not None:
cuts.names = cuts.names[idx]
if cuts.equations is not None:
cuts.equations = cuts.equations[idx]
return cuts
def pick_cuts_up_to_order(cuts, percentile):
"""
Drop the cuts whose order is in a percentile above percentile.
Parameters
----------
cuts: Cuts
percentile
Returns
-------
"""
mask_orders_to_pick = cuts.costs <= np.percentile(cuts.costs, q=percentile)
cuts.costs = cuts.costs[mask_orders_to_pick]
cuts.values = cuts.values[mask_orders_to_pick, :]
if cuts.names is not None:
cuts.names = cuts.names[mask_orders_to_pick]
if cuts.equations is not None:
cuts.equations = cuts.equations[mask_orders_to_pick]
return cuts
def get_data_and_cuts(args):
"""
Function to load the datasets, compute the cuts and the costs.
Parameters
----------
args: SimpleNamespace
The arguments to the program
Returns
-------
data: Data
cuts: Cuts
"""
if args['verbose'] >= 2:
print("Load data\n", flush=True)
data = get_dataset(args)
if args['verbose'] >= 2:
print("Find cuts", flush=True)
cuts = get_cuts(data, args, verbose=args['verbose'])
if args['verbose'] >= 2:
print(f'\tI found {len(cuts.values)} cuts\n')
print("Compute cost", flush=True)
cost_function = get_cost_function(data, args)
cuts = compute_cost_and_order_cuts(cuts, cost_function)
cuts = pick_cuts_up_to_order(cuts,
percentile=args['experiment']['percentile_orders'])
if args['verbose'] >= 2:
max_considered_order = cuts.costs[-1]
print(f"\tI will stop at order: {max_considered_order}")
print(f'\tI will use {len(cuts.values)} cuts\n', flush=True)
if args['plot']['cuts']:
if args['verbose'] >= 2:
print(f"\tPlotting cuts")
plot_cuts(data, cuts,
nb_cuts_to_plot=args['plot']['nb_cuts'],
path=args['plot_dir'])
return data, cuts
def tangle_computation(cuts, agreement, verbose):
"""
Parameters
----------
cuts: Cuts
agreement: int
The agreement parameter
verbose:
verbosity level
Returns
-------
tangles_tree: TangleTree
The tangle search tree
"""
if verbose >= 2:
print(f"Using agreement = {agreement} \n")
print("Start tangle computation", flush=True)
tangles_tree = TangleTree()
old_order = None
unique_orders = np.unique(cuts.costs)
for order in unique_orders:
if old_order is None:
idx_cuts_order_i = np.where(cuts.costs <= order)[0]
else:
idx_cuts_order_i = np.where(np.all([cuts.costs > old_order,
cuts.costs <= order], axis=0))[0]
if len(idx_cuts_order_i) > 0:
if verbose >= 2:
print(f"\tCompute tangles of order {order} with {len(idx_cuts_order_i)} new cuts", flush=True)
cuts_order_i = cuts.values[idx_cuts_order_i]
new_tree = core_algorithm(tangles_tree=tangles_tree,
current_cuts=cuts_order_i,
idx_current_cuts=idx_cuts_order_i,
agreement=agreement)
if new_tree is None:
max_order = cuts.costs[-1]
if verbose >= 2:
print('\t\tI could not add all the new cuts')
print(f'\n\tI stopped the computation at order {old_order} instead of {max_order}', flush=True)
break
else:
tangles_tree = new_tree
if verbose >= 2:
print(f"\t\tI found {len(new_tree.active)} tangles of order {order}", flush=True)
old_order = order
if tangles_tree is not None:
tangles_tree.maximals += tangles_tree.active
return tangles_tree
def print_tangles_names(name_cuts, tangles_by_order, order_best, verbose, path):
path.mkdir(parents=True, exist_ok=True)
if verbose >= 2:
print(f'Printing answers', flush=True)
for order, tangles in tangles_by_order.items():
if len(tangles) > 0:
questions = list(tangles[0].specification.keys())
questions_names = name_cuts[questions]
answers = pd.DataFrame()
for tangle in tangles:
tmp = pd.DataFrame([tangle.specification])
answers = answers.append(tmp)
# answers = answers.astype(str)
# useless_columns = (answers.nunique(axis=0) == 1)
# answers.loc[:, useless_columns] = 'Ignore'
answers.columns = questions_names
answers.to_csv(path / f'{order:.2f}.csv', index=False)
if order == order_best:
answers.to_csv(path / '..' / 'best.csv', index=False)
def tangles_to_range_answers(tangles, cut_names, interval_values, path):
# the questions are of the form 'name greater of equal than value'
# this regex gets the name and the value
template = re.compile(r"(\w+) .+ (\d+)")
range_answers = pd.DataFrame()
for tangle in tangles:
results = {}
for cut, orientation in tangle.specification.items():
name, value = template.findall(cut_names[cut])[0]
value = int(value)
old = results.get(name, None)
if old is None:
new = interval_values
else:
new = old
if orientation:
new = change_lower(new, value)
else:
new = change_upper(new, value - 1)
results[name] = new
range_answers = range_answers.append(pd.DataFrame([results]))
prettification = lambda i: i if i.left != i.right else i.left
convert_to_interval = lambda i:
|
pd.Interval(left=i[0], right=i[1], closed='both')
|
pandas.Interval
|
import os, unittest
import pandas as pd
import hashlib
from sqlalchemy import create_engine
from igf_data.igfdb.igfTables import Base
from igf_data.igfdb.baseadaptor import BaseAdaptor
from igf_data.utils.dbutils import read_json_data, read_dbconf_json
from igf_data.igfdb.useradaptor import UserAdaptor
class Useradaptor_test1(unittest.TestCase):
def setUp(self):
self.dbconfig='data/dbconfig.json'
dbparam=read_dbconf_json(self.dbconfig)
base=BaseAdaptor(**dbparam)
self.engine=base.engine
self.dbname=dbparam['dbname']
Base.metadata.create_all(self.engine)
self.session_class=base.get_session_class()
def tearDown(self):
Base.metadata.drop_all(self.engine)
os.remove(self.dbname)
def test_email_check(self):
ua=UserAdaptor(**{'session_class': self.session_class})
with self.assertRaises(ValueError):
ua._email_check(email='a_b.com')
self.assertFalse(ua._email_check(email='a<EMAIL>'))
def test__encrypt_password(self):
ua=UserAdaptor(**{'session_class': self.session_class})
user_data=pd.Series({'name':'AAAA','password':'<PASSWORD>'})
user_data=ua._encrypt_password(series=user_data)
user_data=user_data.to_dict()
self.assertTrue('encryption_salt' in user_data)
self.assertTrue('ht_password' in user_data)
def test_map_missing_user_status(self):
ua=UserAdaptor(**{'session_class': self.session_class})
user_data1=
|
pd.Series({'name':'AAAA','hpc_username':'BBB'})
|
pandas.Series
|
import combine_gtfs_feeds.cli.log_controller as log_controller
import pandas as pd
import numpy as np
import os as os
import argparse
import sys
from datetime import datetime, timedelta
import time
from pathlib import Path
import zipfile
class Combined_GTFS(object):
file_list = ["agency", "trips", "stop_times", "stops", "routes", "shapes"]
def __init__(self, df_dict):
self.agency_df = df_dict["agency"]
self.routes_df = df_dict["routes"]
self.stops_df = df_dict["stops"]
self.stop_times_df = df_dict["stop_times"]
self.shapes_df = df_dict["shapes"]
self.trips_df = df_dict["trips"]
self.calendar_df = df_dict["calendar"]
def export_feed(self, dir):
dir = Path(dir)
self.agency_df.to_csv(dir / "agency.txt", index=None)
self.routes_df.to_csv(dir / "routes.txt", index=None)
self.stops_df.to_csv(dir / "stops.txt", index=None)
self.stop_times_df.to_csv(dir / "stop_times.txt", index=None)
self.shapes_df.to_csv(dir / "shapes.txt", index=None)
self.trips_df.to_csv(dir / "trips.txt", index=None)
self.calendar_df.to_csv(dir / "calendar.txt", index=None)
def add_run_args(parser, multiprocess=True):
"""
Run command args
"""
parser.add_argument(
"-g",
"--gtfs_dir",
type=str,
metavar="PATH",
help="path to GTFS dir (default: %s)" % os.getcwd(),
)
parser.add_argument(
"-s",
"--service_date",
type=int,
metavar="SERVICEDATE",
help="date for service in yyyymmdd integer format\
(default: %s)"
% os.getcwd(),
)
parser.add_argument(
"-o",
"--output_dir",
type=str,
metavar="PATH",
help="path to ourput directory (default: %s)" % os.getcwd(),
)
def get_service_ids(calendar, calendar_dates, day_of_week, service_date):
"""
Returns a list of valid service_id(s) from each feed
using the user specified service_date.
"""
regular_service_dates = calendar[
(calendar["start_date"] <= service_date)
& (calendar["end_date"] >= service_date)
& (calendar[day_of_week] == 1)
]["service_id"].tolist()
exceptions_df = calendar_dates[calendar_dates["date"] == service_date]
add_service = exceptions_df.loc[exceptions_df["exception_type"] == 1][
"service_id"
].tolist()
remove_service = exceptions_df[exceptions_df["exception_type"] == 2][
"service_id"
].tolist()
service_id_list = [
x for x in (add_service + regular_service_dates) if x not in remove_service
]
return service_id_list
def create_id(df, feed, id_column):
"""
Changes id_column by prepending each value with
the feed parameter.
"""
df[id_column] = feed + "_" + df[id_column].astype(str)
return df
def dt_to_yyyymmdd(dt_time):
"""
Converts a date time object to
YYYYMMDD format.
"""
return 10000 * dt_time.year + 100 * dt_time.month + dt_time.day
def get_start_end_date(my_date):
"""
Gets the day before and after
the user parameter service_date
in YYYYMMDD format.
"""
start_date = dt_to_yyyymmdd(my_date - timedelta(days=1))
end_date = dt_to_yyyymmdd(my_date + timedelta(days=1))
return start_date, end_date
def get_weekday(my_date):
"""
Gets the day of week from user parameter
service date.
"""
week_days = [
"monday",
"tuesday",
"wednesday",
"thursday",
"friday",
"saturday",
"sunday",
]
return week_days[my_date.weekday()]
def convert_to_seconds(value):
"""
Converts hh:mm:ss format to number
of seconds after midnight.
"""
h, m, s = value.split(":")
return int(h) * 3600 + int(m) * 60 + int(s)
def to_hhmmss(value):
"""
Converts to hhmmss format.
"""
return time.strftime("%H:%M:%S", time.gmtime(value))
def frequencies_to_trips(frequencies, trips, stop_times):
"""
For each trip_id in frequencies.txt, calculates the number
of trips and creates records for each trip in trips.txt and
stop_times.txt. Deletes the original represetative trip_id
in both of these files.
"""
# some feeds will use the same trip_id for multiple rows
# need to create a unique id for each row
frequencies["frequency_id"] = frequencies.index
frequencies["start_time_secs"] = frequencies["start_time"].apply(convert_to_seconds)
frequencies["end_time_secs"] = frequencies["end_time"].apply(convert_to_seconds)
# following is coded so the total number of trips
# does not include a final one that leaves the first
# stop at end_time in frequencies. I think this is the
# correct interpredtation of the field description:
# 'Time at which service changes to a different headway
# (or ceases) at the first stop in the trip.'
# Rounding total trips to make sure all trips are counted
# when end time is in the following format: 14:59:59,
# instead of 15:00:00.
frequencies["total_trips"] = (
(
(
(frequencies["end_time_secs"] - frequencies["start_time_secs"])
/ frequencies["headway_secs"]
)
)
.round(0)
.astype(int)
)
trips_update = trips.merge(frequencies, on="trip_id")
trips_update = trips_update.loc[
trips_update.index.repeat(trips_update["total_trips"])
].reset_index(drop=True)
trips_update["counter"] = trips_update.groupby("trip_id").cumcount() + 1
trips_update["trip_id"] = (
trips_update["trip_id"].astype(str) + "_" + trips_update["counter"].astype(str)
)
stop_times_update = frequencies.merge(stop_times, on="trip_id", how="left")
stop_times_update["arrival_time_secs"] = stop_times_update["arrival_time"].apply(
convert_to_seconds
)
stop_times_update["departure_time_secs"] = stop_times_update[
"departure_time"
].apply(convert_to_seconds)
stop_times_update["elapsed_time"] = stop_times_update.groupby(
["trip_id", "start_time"]
)["arrival_time_secs"].transform("first")
stop_times_update["elapsed_time"] = (
stop_times_update["arrival_time_secs"] - stop_times_update["elapsed_time"]
)
stop_times_update["arrival_time_secs"] = (
stop_times_update["start_time_secs"] + stop_times_update["elapsed_time"]
)
# for now assume departure time is the same as arrival time.
stop_times_update["departure_time_secs"] = (
stop_times_update["start_time_secs"] + stop_times_update["elapsed_time"]
)
stop_times_update = stop_times_update.loc[
stop_times_update.index.repeat(stop_times_update["total_trips"])
].reset_index(drop=True)
# handles cae of repeated trip_ids
stop_times_update["counter"] = stop_times_update.groupby(
["frequency_id", "stop_id"]
).cumcount()
stop_times_update["departure_time_secs"] = stop_times_update[
"departure_time_secs"
] + (stop_times_update["counter"] * stop_times_update["headway_secs"])
stop_times_update["arrival_time_secs"] = stop_times_update["arrival_time_secs"] + (
stop_times_update["counter"] * stop_times_update["headway_secs"]
)
# now we want to get the cumcount based on trip_id
stop_times_update["counter"] = (
stop_times_update.groupby(["trip_id", "stop_id"]).cumcount() + 1
)
stop_times_update["departure_time"] = stop_times_update[
"departure_time_secs"
].apply(to_hhmmss)
stop_times_update["arrival_time"] = stop_times_update["arrival_time_secs"].apply(
to_hhmmss
)
stop_times_update["trip_id"] = (
stop_times_update["trip_id"].astype(str)
+ "_"
+ stop_times_update["counter"].astype(str)
)
# remove trip_ids that are in frequencies
stop_times = stop_times[~stop_times["trip_id"].isin(frequencies["trip_id"])]
trips = trips[~trips["trip_id"].isin(frequencies["trip_id"])]
# get rid of some columns
stop_times_update = stop_times_update[stop_times.columns]
trips_update = trips_update[trips.columns]
# add new trips/stop times
trips = pd.concat([trips, trips_update])
stop_times = pd.concat([stop_times, stop_times_update])
return trips, stop_times
def read_gtfs(path, gtfs_file_name, is_zipped, empty_df_cols=[]):
if is_zipped:
zf = zipfile.ZipFile(path.with_suffix(".zip"))
try:
df = pd.read_csv(zf.open(gtfs_file_name))
except:
df =
|
pd.DataFrame(columns=empty_df_cols)
|
pandas.DataFrame
|
"""Pandas DataFrame↔Table conversion helpers"""
import numpy as np
import pandas as pd
from pandas.api.types import (
is_categorical_dtype, is_object_dtype,
is_datetime64_any_dtype, is_numeric_dtype,
)
from Orange.data import (
Table, Domain, DiscreteVariable, StringVariable, TimeVariable,
ContinuousVariable,
)
__all__ = ['table_from_frame', 'table_to_frame']
def table_from_frame(df, *, force_nominal=False):
"""
Convert pandas.DataFrame to Orange.data.Table
Parameters
----------
df : pandas.DataFrame
force_nominal : boolean
If True, interpret ALL string columns as nominal (DiscreteVariable).
Returns
-------
Table
"""
def _is_discrete(s):
return (is_categorical_dtype(s) or
is_object_dtype(s) and (force_nominal or
s.nunique() < s.size**.666))
def _is_datetime(s):
if is_datetime64_any_dtype(s):
return True
try:
if is_object_dtype(s):
pd.to_datetime(s, infer_datetime_format=True)
return True
except Exception: # pylint: disable=broad-except
pass
return False
# If df index is not a simple RangeIndex (or similar), put it into data
if not (df.index.is_integer() and (df.index.is_monotonic_increasing or
df.index.is_monotonic_decreasing)):
df = df.reset_index()
attrs, metas = [], []
X, M = [], []
# Iter over columns
for name, s in df.items():
name = str(name)
if _is_discrete(s):
discrete = s.astype('category').cat
attrs.append(DiscreteVariable(name, discrete.categories.astype(str).tolist()))
X.append(discrete.codes.replace(-1, np.nan).values)
elif _is_datetime(s):
tvar = TimeVariable(name)
attrs.append(tvar)
s = pd.to_datetime(s, infer_datetime_format=True)
X.append(s.astype('str').replace('NaT', np.nan).map(tvar.parse).values)
elif is_numeric_dtype(s):
attrs.append(ContinuousVariable(name))
X.append(s.values)
else:
metas.append(StringVariable(name))
M.append(s.values.astype(object))
return Table.from_numpy(Domain(attrs, None, metas),
np.column_stack(X) if X else np.empty((df.shape[0], 0)),
None,
np.column_stack(M) if M else None)
def table_to_frame(tab, include_metas=False):
"""
Convert Orange.data.Table to pandas.DataFrame
Parameters
----------
tab : Table
include_metas : bool, (default=False)
Include table metas into dataframe.
Returns
-------
pandas.DataFrame
"""
def _column_to_series(col, vals):
result = ()
if col.is_discrete:
codes = pd.Series(vals).fillna(-1).astype(int)
result = (col.name, pd.Categorical.from_codes(codes=codes, categories=col.values,
ordered=col.ordered))
elif col.is_time:
result = (col.name, pd.to_datetime(vals, unit='s').to_series().reset_index()[0])
elif col.is_continuous:
dt = float
# np.nan are not compatible with int column
nan_values_in_column = [t for t in vals if np.isnan(t)]
if col.number_of_decimals == 0 and len(nan_values_in_column) == 0:
dt = int
result = (col.name, pd.Series(vals).astype(dt))
elif col.is_string:
result = (col.name, pd.Series(vals))
return result
def _columns_to_series(cols, vals):
return [_column_to_series(col, vals[:, i]) for i, col in enumerate(cols)]
x, y, metas = [], [], []
domain = tab.domain
if domain.attributes:
x = _columns_to_series(domain.attributes, tab.X)
if domain.class_vars:
y_values = tab.Y.reshape(tab.Y.shape[0], len(domain.class_vars))
y = _columns_to_series(domain.class_vars, y_values)
if domain.metas:
metas = _columns_to_series(domain.metas, tab.metas)
all_series = dict(x + y + metas)
all_vars = tab.domain.variables
if include_metas:
all_vars += tab.domain.metas
original_column_order = [var.name for var in all_vars]
unsorted_columns_df =
|
pd.DataFrame(all_series)
|
pandas.DataFrame
|
import glob
import os
import shutil
import tempfile
import pandas as pd
import pytest
from click.testing import CliRunner
from sadie import app
from sadie.reference import Reference
from sadie.reference.reference import G3Error
known_aux_exceptions = {
("mouse", "imgt", "IGLJ4*01"): "igblast has wrong number of c-term remaining",
("rabbit", "imgt", "IGHJ3*02"): "igblast has wrong reading frame",
}
def test_reference_class():
"""Test if we can JIT reference class."""
ref_class = Reference()
ref_class.add_gene({"species": "human", "gene": "IGHV1-69*01", "database": "imgt"})
ref_class.add_gene({"species": "human", "gene": "IGHD3-3*01", "database": "imgt"})
with pytest.raises(G3Error):
ref_class.add_gene({"species": "human", "gene": "IGHV111-69*01", "database": "imgt"})
assert len(ref_class.get_dataframe()) == 2
def test_load_ref_from_df(fixture_setup):
ref_class = Reference.read_file(fixture_setup.get_reference_dataset_csv())
assert ref_class.data
def test_make_reference_class_from_yaml():
"""Test reference class."""
ref_class = Reference.parse_yaml()
assert isinstance(ref_class, Reference)
ref_class_data = ref_class.get_dataframe()
assert isinstance(ref_class_data, pd.DataFrame)
def _test_auxilary_file_structure(tmpdir, fixture_setup):
# Send aux and internal to compare against IGBLAST internal data and aux data
my_aux = []
generated_aux_path_files = glob.glob(f"{tmpdir}/*/**/*.aux", recursive=True)
for file in generated_aux_path_files:
df = pd.read_csv(
file,
skip_blank_lines=True,
delimiter="\t",
header=None,
names=["gene", "reading_frame", "segment", "cdr3_end", "left_over"],
)
df.insert(0, "common", os.path.basename(file).split("_")[0])
df.insert(1, "db_type", file.split("/")[-3])
my_aux.append(df)
my_aux = pd.concat(my_aux).reset_index(drop=True).set_index(["common", "db_type", "gene"])
igblast_aux = []
igblast_aux_files = fixture_setup.get_aux_files()
for file in igblast_aux_files:
df = pd.read_csv(
file,
skip_blank_lines=True,
delim_whitespace=True,
skiprows=2,
header=None,
names=["gene", "reading_frame", "segment", "cdr3_end", "left_over"],
)
df.insert(0, "common", os.path.basename(file).split("_")[0])
df.insert(1, "db_type", "imgt")
igblast_aux.append(df)
igblast_aux = pd.concat(igblast_aux).reset_index(drop=True).set_index(["common", "db_type", "gene"])
common_index = my_aux.index.intersection(igblast_aux.index)
# sadie_missing_aux = igblast_aux.index.difference(my_aux.index)
# igblast_missing_sadie = my_aux.index.difference(igblast_aux.index)
my_aux_common_index = my_aux.loc[common_index]
igblast_common_index = igblast_aux.loc[common_index]
for index in my_aux_common_index.index:
try:
pd._testing.assert_series_equal(igblast_common_index.loc[index], my_aux_common_index.loc[index])
except AssertionError:
if index in known_aux_exceptions.keys():
print(
index,
"is known exception exception",
known_aux_exceptions[index],
"skipping",
)
continue
else:
# raise again since pandas gives way better info
pd._testing.assert_series_equal(
igblast_common_index.loc[index],
my_aux_common_index.loc[index],
obj=index,
)
return True
def _test_internal_data_file_structure(tmpdir, fixture_setup):
# what we have made
internal_path = glob.glob(f"{tmpdir}/imgt/**/*.imgt", recursive=True)
reference_internal_path = fixture_setup.get_internal_files()
my_internal_path_df = []
for file in internal_path:
df = pd.read_csv(
file,
skip_blank_lines=True,
delimiter="\t",
header=None,
names=[
"gene",
"fwr1_start",
"fwr1_end",
"cdr1_start",
"cdr1_end",
"fwr2_start",
"fwr2_end",
"cdr2_start",
"cdr2_end",
"fwr3_start",
"fwr3_end",
"segment",
"weird",
],
)
df.insert(0, "common", os.path.basename(file).split(".ndm")[0])
df.insert(1, "db_type", "imgt")
my_internal_path_df.append(df)
my_internal_path_df = (
|
pd.concat(my_internal_path_df)
|
pandas.concat
|
import unittest
import pandas as pd
import analysis_tools as analysis
import matplotlib.pyplot as plt
class TestStringMethods(unittest.TestCase):
def setUp(self):
xs = [10, 30, 110, -20, 50, 50, 30, 80, 99, 10]
ys = [20, 60, 110, -40, 100, 100, 60, 160, 200, 20]
ts = [0, 1000, 2000, 3000, 8000, 9000, 10000, 11000, 12000, 13000]
self.dirty_tracking_data =
|
pd.DataFrame({"x": xs, "y": ys, "t": ts})
|
pandas.DataFrame
|
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, date_range, offsets
import pandas._testing as tm
class TestDataFrameShift:
def test_shift(self, datetime_frame, int_frame):
# naive shift
shiftedFrame = datetime_frame.shift(5)
tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
shiftedSeries = datetime_frame["A"].shift(5)
tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
shiftedFrame = datetime_frame.shift(-5)
tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
shiftedSeries = datetime_frame["A"].shift(-5)
tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
# shift by 0
unshifted = datetime_frame.shift(0)
tm.assert_frame_equal(unshifted, datetime_frame)
# shift by DateOffset
shiftedFrame = datetime_frame.shift(5, freq=offsets.BDay())
assert len(shiftedFrame) == len(datetime_frame)
shiftedFrame2 = datetime_frame.shift(5, freq="B")
tm.assert_frame_equal(shiftedFrame, shiftedFrame2)
d = datetime_frame.index[0]
shifted_d = d +
|
offsets.BDay(5)
|
pandas.offsets.BDay
|
"""
对多个策略的净值进行动态平衡
"""
import pandas as pd
import numpy as np
from .dealutils import calDrawdown
class Rebalance(object):
"""
"""
def __init__(self, dailyReturnDF, diff):
"""
:param navsDF: 多个策略的日收益, columns = [参数名], index=日期, values=每日收益
"""
self.dailyReturnDF = dailyReturnDF
self.diff = diff
self.navDF = None # 合成出来的净值曲线 columns=[参数名1, 参数名2, ..., 平均]
self.drawndown = None # 对合成出来的净值曲线计算回撤
def run(self):
"""
执行动态平衡
:return:
"""
# 动态平衡
self.reBalance()
# 计算动态平衡后的回撤
self.calDrawdown()
def reBalance(self):
"""
做动态平衡
:return:
"""
dailyReturnDF = self.dailyReturnDF
diff = 1 - self.diff
nav =
|
pd.Series(1, index=dailyReturnDF.columns)
|
pandas.Series
|
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
import math
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day
from qteasy.utilfuncs import next_market_trade_day, unify, mask_to_signal, list_or_slice, labels_to_dict
from qteasy.utilfuncs import weekday_name, prev_market_trade_day, is_number_like, list_truncate, input_to_list
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator, TimingDMA, TimingMACD, TimingCDL, TimingTRIX
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic, stock_company
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.history import stack_dataframes, dataframe_to_hp, HistoryPanel
from qteasy.database import DataSource
from qteasy.strategy import Strategy, SimpleTiming, RollingTiming, SimpleSelecting, FactoralSelecting
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
from qteasy.blender import _exp_to_token, blender_parser, signal_blend
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000., 20000., 10000.])
self.op = np.array([0., 1., -0.33333333])
self.amounts_to_sell = np.array([0., 0., -3333.3333])
self.cash_to_spend = np.array([0., 20000., 0.])
self.prices = np.array([10., 20., 10.])
self.r = qt.Cost(0.0)
def test_rate_creation(self):
"""测试对象生成"""
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
self.assertEqual(self.r.buy_fix, 0)
self.assertEqual(self.r.sell_fix, 0)
def test_rate_operations(self):
"""测试交易费率对象"""
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r.calculate(self.amounts),
[0.003, 0.003, 0.003]),
True,
'fee calculation wrong')
def test_rate_fee(self):
"""测试买卖交易费率"""
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 0.
self.r.sell_min = 0.
self.r.slipage = 0.
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1.))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
"""测试最低交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""测试最低交易费用对其他交易费率参数的影响"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
"""测试固定交易费用"""
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
"""测试交易滑点"""
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.cash_to_spend, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.amounts_to_sell))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.amounts_to_sell)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.cash_to_spend, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31'),
Timestamp('2031-12-29'),
Timestamp('2032-12-29'),
Timestamp('2033-12-29'),
Timestamp('2034-12-29'),
Timestamp('2035-12-29'),
Timestamp('2036-12-29'),
Timestamp('2037-12-29'),
Timestamp('2038-12-29'),
Timestamp('2039-12-29'),
Timestamp('2040-12-29'),
Timestamp('2041-12-29'),
Timestamp('2042-12-29')])
self.assertEqual(cp_mul_float2.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31')])
self.assertEqual(cp_mul_float2.amounts, [20000.0,
22000.0,
24000.0,
26000.0,
28000.0,
30000.0,
32000.0,
34000.0,
36000.0,
38000.0,
40000.0,
42000.0])
class TestPool(unittest.TestCase):
def setUp(self):
self.p = ResultPool(5)
self.items = ['first', 'second', (1, 2, 3), 'this', 24]
self.perfs = [1, 2, 3, 4, 5]
self.additional_result1 = ('abc', 12)
self.additional_result2 = ([1, 2], -1)
self.additional_result3 = (12, 5)
def test_create(self):
self.assertIsInstance(self.p, ResultPool)
def test_operation(self):
self.p.in_pool(self.additional_result1[0], self.additional_result1[1])
self.p.cut()
self.assertEqual(self.p.item_count, 1)
self.assertEqual(self.p.items, ['abc'])
for item, perf in zip(self.items, self.perfs):
self.p.in_pool(item, perf)
self.assertEqual(self.p.item_count, 6)
self.assertEqual(self.p.items, ['abc', 'first', 'second', (1, 2, 3), 'this', 24])
self.p.cut()
self.assertEqual(self.p.items, ['second', (1, 2, 3), 'this', 24, 'abc'])
self.assertEqual(self.p.perfs, [2, 3, 4, 5, 12])
self.p.in_pool(self.additional_result2[0], self.additional_result2[1])
self.p.in_pool(self.additional_result3[0], self.additional_result3[1])
self.assertEqual(self.p.item_count, 7)
self.p.cut(keep_largest=False)
self.assertEqual(self.p.items, [[1, 2], 'second', (1, 2, 3), 'this', 24])
self.assertEqual(self.p.perfs, [-1, 2, 3, 4, 5])
class TestCoreSubFuncs(unittest.TestCase):
"""Test all functions in core.py"""
def setUp(self):
pass
def test_input_to_list(self):
print('Testing input_to_list() function')
input_str = 'first'
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 3), ['first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 4), ['first', 'first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 2, None), ['first', 'first'])
input_list = ['first', 'second']
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 3), ['first', 'second', None])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 4, 'padder'), ['first', 'second', 'padder', 'padder'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 1), ['first', 'second'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, -5), ['first', 'second'])
def test_point_in_space(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
p2 = (-1, 3, 10)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
self.assertFalse(p2 in sp)
print(f'point {p2} is not in space {sp}')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)], 'conti, conti, enum')
p1 = (5.5, 3.2, 8)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
def test_space_in_space(self):
print('test if a space is in another space')
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
sp2 = Space([(0., 10.), (0., 10.), (0., 10.)])
self.assertTrue(sp2 in sp)
self.assertTrue(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is in space {sp2}\n'
f'they are equal to each other\n')
sp2 = Space([(0, 5.), (2, 7.), (3., 9.)])
self.assertTrue(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'{sp2} is a sub space of {sp}\n')
sp2 = Space([(0, 5), (2, 7), (3., 9)])
self.assertFalse(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)])
self.assertFalse(sp in sp2)
self.assertFalse(sp2 in sp)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
def test_space_around_centre(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
ssp = space_around_centre(space=sp, centre=p1, radius=1.2)
print(ssp.boes)
print('\ntest multiple diameters:')
self.assertEqual(ssp.boes, [(4.3, 6.7), (2.0, 4.4), (5.8, 8.2)])
ssp = space_around_centre(space=sp, centre=p1, radius=[1, 2, 1])
print(ssp.boes)
self.assertEqual(ssp.boes, [(4.5, 6.5), (1.2000000000000002, 5.2), (6.0, 8.0)])
print('\ntest points on edge:')
p2 = (5.5, 3.2, 10)
ssp = space_around_centre(space=sp, centre=p1, radius=3.9)
print(ssp.boes)
self.assertEqual(ssp.boes, [(1.6, 9.4), (0.0, 7.1), (3.1, 10.0)])
print('\ntest enum spaces')
sp = Space([(0, 100), range(40, 3, -2)], 'discr, enum')
p1 = [34, 12]
ssp = space_around_centre(space=sp, centre=p1, radius=5, ignore_enums=False)
self.assertEqual(ssp.boes, [(29, 39), (22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(ssp.boes)
print('\ntest enum space and ignore enum axis')
ssp = space_around_centre(space=sp, centre=p1, radius=5)
self.assertEqual(ssp.boes, [(29, 39),
(40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(sp.boes)
def test_get_stock_pool(self):
print(f'start test building stock pool function\n')
share_basics = stock_basic(fields='ts_code,symbol,name,area,industry,market,list_date,exchange')
print(f'\nselect all stocks by area')
stock_pool = qt.get_stock_pool(area='上海')
print(f'{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "上海"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].eq('上海').all())
print(f'\nselect all stocks by multiple areas')
stock_pool = qt.get_stock_pool(area='贵州,北京,天津')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are in list of ["贵州", "北京", "天津"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['贵州',
'北京',
'天津']).all())
print(f'\nselect all stocks by area and industry')
stock_pool = qt.get_stock_pool(area='四川', industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "四川", and industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['四川']).all())
print(f'\nselect all stocks by industry')
stock_pool = qt.get_stock_pool(industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stocks industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
print(f'\nselect all stocks by market')
stock_pool = qt.get_stock_pool(market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
print(f'\nselect all stocks by market and list date')
stock_pool = qt.get_stock_pool(date='2000-01-01', market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板", and list date after "2000-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('2000-01-01').all())
print(f'\nselect all stocks by list date')
stock_pool = qt.get_stock_pool(date='1997-01-01')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all list date after "1997-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1997-01-01').all())
print(f'\nselect all stocks by exchange')
stock_pool = qt.get_stock_pool(exchange='SSE')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['exchange'].eq('SSE').all())
print(f'\nselect all stocks by industry, area and list date')
industry_list = ['银行', '全国地产', '互联网', '环境保护', '区域地产',
'酒店餐饮', '运输设备', '综合类', '建筑工程', '玻璃',
'家用电器', '文教休闲', '其他商业', '元器件', 'IT设备',
'其他建材', '汽车服务', '火力发电', '医药商业', '汽车配件',
'广告包装', '轻工机械', '新型电力', '多元金融', '饲料']
area_list = ['深圳', '北京', '吉林', '江苏', '辽宁', '广东',
'安徽', '四川', '浙江', '湖南', '河北', '新疆',
'山东', '河南', '山西', '江西', '青海', '湖北',
'内蒙', '海南', '重庆', '陕西', '福建', '广西',
'上海']
stock_pool = qt.get_stock_pool(date='19980101',
industry=industry_list,
area=area_list)
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1998-01-01').all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(industry_list).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(area_list).all())
self.assertRaises(KeyError, qt.get_stock_pool, industry=25)
self.assertRaises(KeyError, qt.get_stock_pool, share_name='000300.SH')
self.assertRaises(KeyError, qt.get_stock_pool, markets='SSE')
class TestEvaluations(unittest.TestCase):
"""Test all evaluation functions in core.py"""
# 以下手动计算结果在Excel文件中
def setUp(self):
"""用np.random生成测试用数据,使用cumsum()模拟股票走势"""
self.test_data1 = pd.DataFrame([5.34892759, 5.65768696, 5.79227076, 5.56266871, 5.88189632,
6.24795001, 5.92755558, 6.38748165, 6.31331899, 5.86001665,
5.61048472, 5.30696736, 5.40406792, 5.03180571, 5.37886353,
5.78608307, 6.26540339, 6.59348026, 6.90943801, 6.70911677,
6.33015954, 6.06697417, 5.9752499, 6.45786408, 6.95273763,
6.7691991, 6.70355481, 6.28048969, 6.61344541, 6.24620003,
6.47409983, 6.4522311, 6.8773094, 6.99727832, 6.59262674,
6.59014938, 6.63758237, 6.38331869, 6.09902105, 6.35390109,
6.51993567, 6.87244592, 6.83963485, 7.08797815, 6.88003144,
6.83657323, 6.97819483, 7.01600276, 7.12554256, 7.58941523,
7.61014457, 7.21224091, 7.48174399, 7.66490854, 7.51371968,
7.11586198, 6.97147399, 6.67453301, 6.2042138, 6.33967015,
6.22187938, 5.98426993, 6.37096079, 6.55897161, 6.26422645,
6.69363762, 7.12668015, 6.83232926, 7.30524081, 7.4262041,
7.54031383, 7.17545919, 7.20659257, 7.44886016, 7.37094393,
6.88011022, 7.08142491, 6.74992833, 6.5967097, 6.21336693,
6.35565105, 6.82347596, 6.44773408, 6.84538053, 6.47966466,
6.09699528, 5.63927014, 6.01081024, 6.20585303, 6.60528206,
7.01594726, 7.03684251, 6.76574977, 7.08740846, 6.65336462,
7.07126686, 6.80058956, 6.79241977, 6.47843472, 6.39245474],
columns=['value'])
self.test_data2 = pd.DataFrame([5.09276527, 4.83828592, 4.6000911, 4.63170487, 4.63566451,
4.50546921, 4.96390044, 4.64557907, 4.25787855, 3.76585551,
3.38826334, 3.76243422, 4.06365426, 3.87084726, 3.91400935,
4.13438822, 4.27064542, 4.56776104, 5.03800296, 5.31070529,
5.39902276, 5.21186286, 5.05683114, 4.68842046, 5.11895168,
5.27151571, 5.72294993, 6.09961056, 6.26569635, 6.48806151,
6.16058885, 6.2582459, 6.38934791, 6.57831057, 6.19508831,
5.70155153, 5.20435735, 5.36538825, 5.40450056, 5.2227697,
5.37828693, 5.53058991, 6.02996797, 5.76802181, 5.66166713,
6.07988994, 5.61794367, 5.63218151, 6.10728013, 6.0324168,
6.27164431, 6.27551239, 6.52329665, 7.00470007, 7.34163113,
7.33699083, 7.67661334, 8.09395749, 7.68086668, 7.58341161,
7.46219819, 7.58671899, 7.19348298, 7.40088323, 7.47562005,
7.93342043, 8.2286081, 8.3521632, 8.43590025, 8.34977395,
8.57563095, 8.81586328, 9.08738649, 9.01542031, 8.8653815,
9.21763111, 9.04233017, 8.59533999, 8.47590075, 8.70857222,
8.78890756, 8.92697606, 9.35743773, 9.68280866, 10.15622021,
10.55908549, 10.6337894, 10.55197128, 10.65435176, 10.54611045,
10.19432562, 10.48320884, 10.36176768, 10.03186854, 10.23656092,
10.0062843, 10.13669686, 10.30758958, 9.87904176, 10.05126375],
columns=['value'])
self.test_data3 = pd.DataFrame([5.02851874, 5.20700348, 5.02410709, 5.49836387, 5.06834371,
5.10956737, 5.15314979, 5.02256472, 5.09746382, 5.23909247,
4.93410336, 4.96316186, 5.40026682, 5.7353255, 5.53438319,
5.79092139, 5.67528173, 5.89840855, 5.75379463, 6.10855386,
5.77322365, 5.84538021, 5.6103973, 5.7518655, 5.49729695,
5.13610628, 5.30524121, 5.68093462, 5.73251319, 6.04420783,
6.26929843, 6.59610234, 6.09872345, 6.25475121, 6.72927396,
6.91395783, 7.00693283, 7.36217783, 7.71516676, 7.67580263,
7.62477511, 7.73600568, 7.53457914, 7.46170277, 7.83658014,
8.11481319, 8.03705544, 7.64948845, 7.52043731, 7.67247943,
7.46511982, 7.43541798, 7.58856517, 7.9392717, 8.25406287,
7.77031632, 8.03223447, 7.86799055, 7.57630999, 7.33230519,
7.22378732, 6.85972264, 7.17548456, 7.5387846, 7.2392632,
6.8455644, 6.59557185, 6.6496796, 6.73685623, 7.18598015,
7.13619128, 6.88060157, 7.1399681, 7.30308077, 6.94942434,
7.0247815, 7.37567798, 7.50080197, 7.59719284, 7.14520561,
7.29913484, 7.79551341, 8.15497781, 8.40456095, 8.86516528,
8.53042688, 8.94268762, 8.52048006, 8.80036284, 8.91602364,
9.19953385, 8.70828953, 8.24613093, 8.18770453, 7.79548389,
7.68627967, 7.23205036, 6.98302636, 7.06515819, 6.95068113],
columns=['value'])
self.test_data4 = pd.DataFrame([4.97926539, 5.44016005, 5.45122915, 5.74485615, 5.45600553,
5.44858945, 5.2435413, 5.47315161, 5.58464303, 5.36179749,
5.38236326, 5.29614981, 5.76523508, 5.75102892, 6.15316618,
6.03852528, 6.01442228, 5.70510182, 5.22748133, 5.46762379,
5.78926267, 5.8221362, 5.61236849, 5.30615725, 5.24200611,
5.41042642, 5.59940342, 5.28306781, 4.99451932, 5.08799266,
5.38865647, 5.58229139, 5.33492845, 5.48206276, 5.09721379,
5.39190493, 5.29965087, 5.0374415, 5.50798022, 5.43107577,
5.22759507, 4.991809, 5.43153084, 5.39966868, 5.59916352,
5.66412137, 6.00611838, 5.63564902, 5.66723484, 5.29863863,
4.91115153, 5.3749929, 5.75082334, 6.08308148, 6.58091182,
6.77848803, 7.19588758, 7.64862286, 7.99818347, 7.91824794,
8.30341071, 8.45984973, 7.98700002, 8.18924931, 8.60755649,
8.66233396, 8.91018407, 9.0782739, 9.33515448, 8.95870245,
8.98426422, 8.50340317, 8.64916085, 8.93592407, 8.63145745,
8.65322862, 8.39543204, 8.37969997, 8.23394504, 8.04062872,
7.91259763, 7.57252171, 7.72670114, 7.74486117, 8.06908188,
7.99166889, 7.92155906, 8.39956136, 8.80181323, 8.47464091,
8.06557064, 7.87145573, 8.0237959, 8.39481998, 8.68525692,
8.81185461, 8.98632237, 9.0989835, 8.89787405, 8.86508591],
columns=['value'])
self.test_data5 = pd.DataFrame([4.50258923, 4.35142568, 4.07459514, 3.87791297, 3.73715985,
3.98455684, 4.07587908, 4.00042472, 4.28276612, 4.01362051,
4.13713565, 4.49312372, 4.48633159, 4.4641207, 4.13444605,
3.79107217, 4.22941629, 4.56548511, 4.92472163, 5.27723158,
5.67409193, 6.00176917, 5.88889928, 5.55256103, 5.39308314,
5.2610492, 5.30738908, 5.22222408, 4.90332238, 4.57499908,
4.96097146, 4.81531011, 4.39115442, 4.63200662, 5.04588813,
4.67866025, 5.01705123, 4.83562258, 4.60381702, 4.66187576,
4.41292828, 4.86604507, 4.42280124, 4.07517294, 4.16317319,
4.10316596, 4.42913598, 4.06609666, 3.96725913, 4.15965746,
4.12379564, 4.04054068, 3.84342851, 3.45902867, 3.17649855,
3.09773586, 3.5502119, 3.66396995, 3.66306483, 3.29131401,
2.79558533, 2.88319542, 3.03671098, 3.44645857, 3.88167161,
3.57961874, 3.60180276, 3.96702102, 4.05429995, 4.40056979,
4.05653231, 3.59600456, 3.60792477, 4.09989922, 3.73503663,
4.01892626, 3.94597242, 3.81466605, 3.71417992, 3.93767156,
4.42806557, 4.06988106, 4.03713636, 4.34408673, 4.79810156,
5.18115011, 4.89798406, 5.3960077, 5.72504875, 5.61894017,
5.1958197, 4.85275896, 5.17550207, 4.71548987, 4.62408567,
4.55488535, 4.36532649, 4.26031979, 4.25225607, 4.58627048],
columns=['value'])
self.test_data6 = pd.DataFrame([5.08639513, 5.05761083, 4.76160923, 4.62166504, 4.62923183,
4.25070173, 4.13447513, 3.90890013, 3.76687608, 3.43342482,
3.67648224, 3.6274775, 3.9385404, 4.39771627, 4.03199346,
3.93265288, 3.50059789, 3.3851961, 3.29743973, 3.2544872,
2.93692949, 2.70893003, 2.55461976, 2.20922332, 2.29054475,
2.2144714, 2.03726827, 2.39007617, 2.29866155, 2.40607111,
2.40440444, 2.79374649, 2.66541922, 2.27018079, 2.08505127,
2.55478864, 2.22415625, 2.58517923, 2.58802256, 2.94870959,
2.69301739, 2.19991535, 2.69473146, 2.64704637, 2.62753542,
2.14240825, 2.38565154, 1.94592117, 2.32243877, 2.69337246,
2.51283854, 2.62484451, 2.15559054, 2.35410875, 2.31219177,
1.96018265, 2.34711266, 2.58083322, 2.40290041, 2.20439791,
2.31472425, 2.16228248, 2.16439749, 2.20080737, 1.73293206,
1.9264407, 2.25089861, 2.69269101, 2.59296687, 2.1420998,
1.67819153, 1.98419023, 2.14479494, 1.89055376, 1.96720648,
1.9916694, 2.37227761, 2.14446036, 2.34573903, 1.86162546,
2.1410721, 2.39204939, 2.52529064, 2.47079939, 2.9299031,
3.09452923, 2.93276708, 3.21731309, 3.06248964, 2.90413406,
2.67844632, 2.45621213, 2.41463398, 2.7373913, 3.14917045,
3.4033949, 3.82283446, 4.02285451, 3.7619638, 4.10346795],
columns=['value'])
self.test_data7 = pd.DataFrame([4.75233583, 4.47668283, 4.55894263, 4.61765848, 4.622892,
4.58941116, 4.32535872, 3.88112797, 3.47237806, 3.50898953,
3.82530406, 3.6718017, 3.78918195, 4.1800752, 4.01818557,
4.40822582, 4.65474654, 4.89287256, 4.40879274, 4.65505126,
4.36876403, 4.58418934, 4.75687172, 4.3689799, 4.16126498,
4.0203982, 3.77148242, 3.38198096, 3.07261764, 2.9014741,
2.5049543, 2.756105, 2.28779058, 2.16986991, 1.8415962,
1.83319008, 2.20898291, 2.00128981, 1.75747025, 1.26676663,
1.40316876, 1.11126484, 1.60376367, 1.22523829, 1.58816681,
1.49705679, 1.80244138, 1.55128293, 1.35339409, 1.50985759,
1.0808451, 1.05892796, 1.43414812, 1.43039101, 1.73631655,
1.43940867, 1.82864425, 1.71088265, 2.12015154, 2.45417128,
2.84777618, 2.7925612, 2.90975121, 3.25920745, 3.13801182,
3.52733677, 3.65468491, 3.69395211, 3.49862035, 3.24786017,
3.64463138, 4.00331929, 3.62509565, 3.78013949, 3.4174012,
3.76312271, 3.62054004, 3.67206716, 3.60596058, 3.38636199,
3.42580676, 3.32921095, 3.02976759, 3.28258676, 3.45760838,
3.24917528, 2.94618304, 2.86980011, 2.63191259, 2.39566759,
2.53159917, 2.96273967, 3.25626185, 2.97425402, 3.16412191,
3.58280763, 3.23257727, 3.62353556, 3.12806399, 2.92532313],
columns=['value'])
# 建立一个长度为 500 个数据点的测试数据, 用于测试数据点多于250个的情况下的评价过程
self.long_data = pd.DataFrame([9.879, 9.916, 10.109, 10.214, 10.361, 10.768, 10.594, 10.288,
10.082, 9.994, 10.125, 10.126, 10.384, 10.734, 10.4, 10.87,
11.338, 11.061, 11.415, 11.724, 12.077, 12.196, 12.064, 12.423,
12.19, 11.729, 11.677, 11.448, 11.485, 10.989, 11.242, 11.239,
11.113, 11.075, 11.471, 11.745, 11.754, 11.782, 12.079, 11.97,
12.178, 11.95, 12.438, 12.612, 12.804, 12.952, 12.612, 12.867,
12.832, 12.832, 13.015, 13.315, 13.249, 12.904, 12.776, 12.64,
12.543, 12.287, 12.225, 11.844, 11.985, 11.945, 11.542, 11.871,
12.245, 12.228, 12.362, 11.899, 11.962, 12.374, 12.816, 12.649,
12.252, 12.579, 12.3, 11.988, 12.177, 12.312, 12.744, 12.599,
12.524, 12.82, 12.67, 12.876, 12.986, 13.271, 13.606, 13.82,
14.161, 13.833, 13.831, 14.137, 13.705, 13.414, 13.037, 12.759,
12.642, 12.948, 13.297, 13.483, 13.836, 14.179, 13.709, 13.655,
13.198, 13.508, 13.953, 14.387, 14.043, 13.987, 13.561, 13.391,
12.923, 12.555, 12.503, 12.292, 11.877, 12.34, 12.141, 11.687,
11.992, 12.458, 12.131, 11.75, 11.739, 11.263, 11.762, 11.976,
11.578, 11.854, 12.136, 12.422, 12.311, 12.56, 12.879, 12.861,
12.973, 13.235, 13.53, 13.531, 13.137, 13.166, 13.31, 13.103,
13.007, 12.643, 12.69, 12.216, 12.385, 12.046, 12.321, 11.9,
11.772, 11.816, 11.871, 11.59, 11.518, 11.94, 11.803, 11.924,
12.183, 12.136, 12.361, 12.406, 11.932, 11.684, 11.292, 11.388,
11.874, 12.184, 12.002, 12.16, 11.741, 11.26, 11.123, 11.534,
11.777, 11.407, 11.275, 11.679, 11.62, 11.218, 11.235, 11.352,
11.366, 11.061, 10.661, 10.582, 10.899, 11.352, 11.792, 11.475,
11.263, 11.538, 11.183, 10.936, 11.399, 11.171, 11.214, 10.89,
10.728, 11.191, 11.646, 11.62, 11.195, 11.178, 11.18, 10.956,
11.205, 10.87, 11.098, 10.639, 10.487, 10.507, 10.92, 10.558,
10.119, 9.882, 9.573, 9.515, 9.845, 9.852, 9.495, 9.726,
10.116, 10.452, 10.77, 11.225, 10.92, 10.824, 11.096, 11.542,
11.06, 10.568, 10.585, 10.884, 10.401, 10.068, 9.964, 10.285,
10.239, 10.036, 10.417, 10.132, 9.839, 9.556, 9.084, 9.239,
9.304, 9.067, 8.587, 8.471, 8.007, 8.321, 8.55, 9.008,
9.138, 9.088, 9.434, 9.156, 9.65, 9.431, 9.654, 10.079,
10.411, 10.865, 10.51, 10.205, 10.519, 10.367, 10.855, 10.642,
10.298, 10.622, 10.173, 9.792, 9.995, 9.904, 9.771, 9.597,
9.506, 9.212, 9.688, 10.032, 9.723, 9.839, 9.918, 10.332,
10.236, 9.989, 10.192, 10.685, 10.908, 11.275, 11.72, 12.158,
12.045, 12.244, 12.333, 12.246, 12.552, 12.958, 13.11, 13.53,
13.123, 13.138, 13.57, 13.389, 13.511, 13.759, 13.698, 13.744,
13.467, 13.795, 13.665, 13.377, 13.423, 13.772, 13.295, 13.073,
12.718, 12.388, 12.399, 12.185, 11.941, 11.818, 11.465, 11.811,
12.163, 11.86, 11.935, 11.809, 12.145, 12.624, 12.768, 12.321,
12.277, 11.889, 12.11, 12.606, 12.943, 12.945, 13.112, 13.199,
13.664, 14.051, 14.189, 14.339, 14.611, 14.656, 15.112, 15.086,
15.263, 15.021, 15.346, 15.572, 15.607, 15.983, 16.151, 16.215,
16.096, 16.089, 16.32, 16.59, 16.657, 16.752, 16.583, 16.743,
16.373, 16.662, 16.243, 16.163, 16.491, 16.958, 16.977, 17.225,
17.637, 17.344, 17.684, 17.892, 18.036, 18.182, 17.803, 17.588,
17.101, 17.538, 17.124, 16.787, 17.167, 17.138, 16.955, 17.148,
17.135, 17.635, 17.718, 17.675, 17.622, 17.358, 17.754, 17.729,
17.576, 17.772, 18.239, 18.441, 18.729, 18.319, 18.608, 18.493,
18.069, 18.122, 18.314, 18.423, 18.709, 18.548, 18.384, 18.391,
17.988, 17.986, 17.653, 17.249, 17.298, 17.06, 17.36, 17.108,
17.348, 17.596, 17.46, 17.635, 17.275, 17.291, 16.933, 17.337,
17.231, 17.146, 17.148, 16.751, 16.891, 17.038, 16.735, 16.64,
16.231, 15.957, 15.977, 16.077, 16.054, 15.797, 15.67, 15.911,
16.077, 16.17, 15.722, 15.258, 14.877, 15.138, 15., 14.811,
14.698, 14.407, 14.583, 14.704, 15.153, 15.436, 15.634, 15.453,
15.877, 15.696, 15.563, 15.927, 16.255, 16.696, 16.266, 16.698,
16.365, 16.493, 16.973, 16.71, 16.327, 16.605, 16.486, 16.846,
16.935, 17.21, 17.389, 17.546, 17.773, 17.641, 17.485, 17.794,
17.354, 16.904, 16.675, 16.43, 16.898, 16.819, 16.921, 17.201,
17.617, 17.368, 17.864, 17.484],
columns=['value'])
self.long_bench = pd.DataFrame([9.7, 10.179, 10.321, 9.855, 9.936, 10.096, 10.331, 10.662,
10.59, 11.031, 11.154, 10.945, 10.625, 10.233, 10.284, 10.252,
10.221, 10.352, 10.444, 10.773, 10.904, 11.104, 10.797, 10.55,
10.943, 11.352, 11.641, 11.983, 11.696, 12.138, 12.365, 12.379,
11.969, 12.454, 12.947, 13.119, 13.013, 12.763, 12.632, 13.034,
12.681, 12.561, 12.938, 12.867, 13.202, 13.132, 13.539, 13.91,
13.456, 13.692, 13.771, 13.904, 14.069, 13.728, 13.97, 14.228,
13.84, 14.041, 13.963, 13.689, 13.543, 13.858, 14.118, 13.987,
13.611, 14.028, 14.229, 14.41, 14.74, 15.03, 14.915, 15.207,
15.354, 15.665, 15.877, 15.682, 15.625, 15.175, 15.105, 14.893,
14.86, 15.097, 15.178, 15.293, 15.238, 15., 15.283, 14.994,
14.907, 14.664, 14.888, 15.297, 15.313, 15.368, 14.956, 14.802,
14.506, 14.257, 14.619, 15.019, 15.049, 14.625, 14.894, 14.978,
15.434, 15.578, 16.038, 16.107, 16.277, 16.365, 16.204, 16.465,
16.401, 16.895, 17.057, 16.621, 16.225, 16.075, 15.863, 16.292,
16.551, 16.724, 16.817, 16.81, 17.192, 16.86, 16.745, 16.707,
16.552, 16.133, 16.301, 16.08, 15.81, 15.75, 15.909, 16.127,
16.457, 16.204, 16.329, 16.748, 16.624, 17.011, 16.548, 16.831,
16.653, 16.791, 16.57, 16.778, 16.928, 16.932, 17.22, 16.876,
17.301, 17.422, 17.689, 17.316, 17.547, 17.534, 17.409, 17.669,
17.416, 17.859, 17.477, 17.307, 17.245, 17.352, 17.851, 17.412,
17.144, 17.138, 17.085, 16.926, 16.674, 16.854, 17.064, 16.95,
16.609, 16.957, 16.498, 16.552, 16.175, 15.858, 15.697, 15.781,
15.583, 15.36, 15.558, 16.046, 15.968, 15.905, 16.358, 16.783,
17.048, 16.762, 17.224, 17.363, 17.246, 16.79, 16.608, 16.423,
15.991, 15.527, 15.147, 14.759, 14.792, 15.206, 15.148, 15.046,
15.429, 14.999, 15.407, 15.124, 14.72, 14.713, 15.022, 15.092,
14.982, 15.001, 14.734, 14.713, 14.841, 14.562, 15.005, 15.483,
15.472, 15.277, 15.503, 15.116, 15.12, 15.442, 15.476, 15.789,
15.36, 15.764, 16.218, 16.493, 16.642, 17.088, 16.816, 16.645,
16.336, 16.511, 16.2, 15.994, 15.86, 15.929, 16.316, 16.416,
16.746, 17.173, 17.531, 17.627, 17.407, 17.49, 17.768, 17.509,
17.795, 18.147, 18.63, 18.945, 19.021, 19.518, 19.6, 19.744,
19.63, 19.32, 18.933, 19.297, 19.598, 19.446, 19.236, 19.198,
19.144, 19.159, 19.065, 19.032, 18.586, 18.272, 18.119, 18.3,
17.894, 17.744, 17.5, 17.083, 17.092, 16.864, 16.453, 16.31,
16.681, 16.342, 16.447, 16.715, 17.068, 17.067, 16.822, 16.673,
16.675, 16.592, 16.686, 16.397, 15.902, 15.597, 15.357, 15.162,
15.348, 15.603, 15.283, 15.257, 15.082, 14.621, 14.366, 14.039,
13.957, 14.141, 13.854, 14.243, 14.414, 14.033, 13.93, 14.104,
14.461, 14.249, 14.053, 14.165, 14.035, 14.408, 14.501, 14.019,
14.265, 14.67, 14.797, 14.42, 14.681, 15.16, 14.715, 14.292,
14.411, 14.656, 15.094, 15.366, 15.055, 15.198, 14.762, 14.294,
13.854, 13.811, 13.549, 13.927, 13.897, 13.421, 13.037, 13.32,
13.721, 13.511, 13.999, 13.529, 13.418, 13.881, 14.326, 14.362,
13.987, 14.015, 13.599, 13.343, 13.307, 13.689, 13.851, 13.404,
13.577, 13.395, 13.619, 13.195, 12.904, 12.553, 12.294, 12.649,
12.425, 11.967, 12.062, 11.71, 11.645, 12.058, 12.136, 11.749,
11.953, 12.401, 12.044, 11.901, 11.631, 11.396, 11.036, 11.244,
10.864, 11.207, 11.135, 11.39, 11.723, 12.084, 11.8, 11.471,
11.33, 11.504, 11.295, 11.3, 10.901, 10.494, 10.825, 11.054,
10.866, 10.713, 10.875, 10.846, 10.947, 11.422, 11.158, 10.94,
10.521, 10.36, 10.411, 10.792, 10.472, 10.305, 10.525, 10.853,
10.556, 10.72, 10.54, 10.583, 10.299, 10.061, 10.004, 9.903,
9.796, 9.472, 9.246, 9.54, 9.456, 9.177, 9.484, 9.557,
9.493, 9.968, 9.536, 9.39, 8.922, 8.423, 8.518, 8.686,
8.771, 9.098, 9.281, 8.858, 9.027, 8.553, 8.784, 8.996,
9.379, 9.846, 9.855, 9.502, 9.608, 9.761, 9.409, 9.4,
9.332, 9.34, 9.284, 8.844, 8.722, 8.376, 8.775, 8.293,
8.144, 8.63, 8.831, 8.957, 9.18, 9.601, 9.695, 10.018,
9.841, 9.743, 9.292, 8.85, 9.316, 9.288, 9.519, 9.738,
9.289, 9.785, 9.804, 10.06, 10.188, 10.095, 9.739, 9.881,
9.7, 9.991, 10.391, 10.002],
columns=['value'])
def test_performance_stats(self):
"""test the function performance_statistics()
"""
pass
def test_fv(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_fv(self.test_data1), 6.39245474)
self.assertAlmostEqual(eval_fv(self.test_data2), 10.05126375)
self.assertAlmostEqual(eval_fv(self.test_data3), 6.95068113)
self.assertAlmostEqual(eval_fv(self.test_data4), 8.86508591)
self.assertAlmostEqual(eval_fv(self.test_data5), 4.58627048)
self.assertAlmostEqual(eval_fv(self.test_data6), 4.10346795)
self.assertAlmostEqual(eval_fv(self.test_data7), 2.92532313)
self.assertAlmostEqual(eval_fv(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
def test_max_drawdown(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_max_drawdown(self.test_data1)[0], 0.264274308)
self.assertEqual(eval_max_drawdown(self.test_data1)[1], 53)
self.assertEqual(eval_max_drawdown(self.test_data1)[2], 86)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data1)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data2)[0], 0.334690849)
self.assertEqual(eval_max_drawdown(self.test_data2)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data2)[2], 10)
self.assertEqual(eval_max_drawdown(self.test_data2)[3], 19)
self.assertAlmostEqual(eval_max_drawdown(self.test_data3)[0], 0.244452899)
self.assertEqual(eval_max_drawdown(self.test_data3)[1], 90)
self.assertEqual(eval_max_drawdown(self.test_data3)[2], 99)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data3)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data4)[0], 0.201849684)
self.assertEqual(eval_max_drawdown(self.test_data4)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4)[2], 50)
self.assertEqual(eval_max_drawdown(self.test_data4)[3], 54)
self.assertAlmostEqual(eval_max_drawdown(self.test_data5)[0], 0.534206456)
self.assertEqual(eval_max_drawdown(self.test_data5)[1], 21)
self.assertEqual(eval_max_drawdown(self.test_data5)[2], 60)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data5)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data6)[0], 0.670062689)
self.assertEqual(eval_max_drawdown(self.test_data6)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data6)[2], 70)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data6)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data7)[0], 0.783577449)
self.assertEqual(eval_max_drawdown(self.test_data7)[1], 17)
self.assertEqual(eval_max_drawdown(self.test_data7)[2], 51)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data7)[3]))
self.assertEqual(eval_max_drawdown(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
# test max drawdown == 0:
# TODO: investigate: how does divide by zero change?
self.assertAlmostEqual(eval_max_drawdown(self.test_data4 - 5)[0], 1.0770474121951792)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[2], 50)
def test_info_ratio(self):
reference = self.test_data1
self.assertAlmostEqual(eval_info_ratio(self.test_data2, reference, 'value'), 0.075553316)
self.assertAlmostEqual(eval_info_ratio(self.test_data3, reference, 'value'), 0.018949457)
self.assertAlmostEqual(eval_info_ratio(self.test_data4, reference, 'value'), 0.056328143)
self.assertAlmostEqual(eval_info_ratio(self.test_data5, reference, 'value'), -0.004270068)
self.assertAlmostEqual(eval_info_ratio(self.test_data6, reference, 'value'), 0.009198027)
self.assertAlmostEqual(eval_info_ratio(self.test_data7, reference, 'value'), -0.000890283)
def test_volatility(self):
self.assertAlmostEqual(eval_volatility(self.test_data1), 0.748646166)
self.assertAlmostEqual(eval_volatility(self.test_data2), 0.75527442)
self.assertAlmostEqual(eval_volatility(self.test_data3), 0.654188853)
self.assertAlmostEqual(eval_volatility(self.test_data4), 0.688375814)
self.assertAlmostEqual(eval_volatility(self.test_data5), 1.089989522)
self.assertAlmostEqual(eval_volatility(self.test_data6), 1.775419308)
self.assertAlmostEqual(eval_volatility(self.test_data7), 1.962758406)
self.assertAlmostEqual(eval_volatility(self.test_data1, logarithm=False), 0.750993311)
self.assertAlmostEqual(eval_volatility(self.test_data2, logarithm=False), 0.75571473)
self.assertAlmostEqual(eval_volatility(self.test_data3, logarithm=False), 0.655331424)
self.assertAlmostEqual(eval_volatility(self.test_data4, logarithm=False), 0.692683021)
self.assertAlmostEqual(eval_volatility(self.test_data5, logarithm=False), 1.09602969)
self.assertAlmostEqual(eval_volatility(self.test_data6, logarithm=False), 1.774789504)
self.assertAlmostEqual(eval_volatility(self.test_data7, logarithm=False), 2.003329156)
self.assertEqual(eval_volatility(pd.DataFrame()), -np.inf)
self.assertRaises(AssertionError, eval_volatility, [1, 2, 3])
# 测试长数据的Volatility计算
expected_volatility = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
0.39955371, 0.39974258, 0.40309866, 0.40486593, 0.4055514,
0.40710639, 0.40708157, 0.40609006, 0.4073625, 0.40835305,
0.41155304, 0.41218193, 0.41207489, 0.41300276, 0.41308415,
0.41292392, 0.41207645, 0.41238397, 0.41229291, 0.41164056,
0.41316317, 0.41348842, 0.41462249, 0.41474574, 0.41652625,
0.41649176, 0.41701556, 0.4166593, 0.41684221, 0.41491689,
0.41435209, 0.41549087, 0.41849338, 0.41998049, 0.41959106,
0.41907311, 0.41916103, 0.42120773, 0.42052391, 0.42111225,
0.42124589, 0.42356445, 0.42214672, 0.42324022, 0.42476639,
0.42621689, 0.42549439, 0.42533678, 0.42539414, 0.42545038,
0.42593637, 0.42652095, 0.42665489, 0.42699563, 0.42798159,
0.42784512, 0.42898006, 0.42868781, 0.42874188, 0.42789631,
0.4277768, 0.42776827, 0.42685216, 0.42660989, 0.42563155,
0.42618281, 0.42606281, 0.42505222, 0.42653242, 0.42555378,
0.42500842, 0.42561939, 0.42442059, 0.42395414, 0.42384356,
0.42319135, 0.42397497, 0.42488579, 0.42449729, 0.42508766,
0.42509878, 0.42456616, 0.42535577, 0.42681884, 0.42688552,
0.42779918, 0.42706058, 0.42792887, 0.42762114, 0.42894045,
0.42977398, 0.42919859, 0.42829041, 0.42780946, 0.42825318,
0.42858952, 0.42858315, 0.42805601, 0.42764751, 0.42744107,
0.42775518, 0.42707283, 0.4258592, 0.42615335, 0.42526286,
0.4248906, 0.42368986, 0.4232565, 0.42265079, 0.42263954,
0.42153046, 0.42132051, 0.41995353, 0.41916605, 0.41914271,
0.41876945, 0.41740175, 0.41583884, 0.41614026, 0.41457908,
0.41472411, 0.41310876, 0.41261041, 0.41212369, 0.41211677,
0.4100645, 0.40852504, 0.40860297, 0.40745338, 0.40698661,
0.40644546, 0.40591375, 0.40640744, 0.40620663, 0.40656649,
0.40727154, 0.40797605, 0.40807137, 0.40808913, 0.40809676,
0.40711767, 0.40724628, 0.40713077, 0.40772698, 0.40765157,
0.40658297, 0.4065991, 0.405011, 0.40537645, 0.40432626,
0.40390177, 0.40237701, 0.40291623, 0.40301797, 0.40324145,
0.40312864, 0.40328316, 0.40190955, 0.40246506, 0.40237663,
0.40198407, 0.401969, 0.40185623, 0.40198313, 0.40005643,
0.39940743, 0.39850438, 0.39845398, 0.39695093, 0.39697295,
0.39663201, 0.39675444, 0.39538699, 0.39331959, 0.39326074,
0.39193287, 0.39157266, 0.39021327, 0.39062591, 0.38917591,
0.38976991, 0.38864187, 0.38872158, 0.38868096, 0.38868377,
0.38842057, 0.38654784, 0.38649517, 0.38600464, 0.38408115,
0.38323049, 0.38260215, 0.38207663, 0.38142669, 0.38003262,
0.37969367, 0.37768092, 0.37732108, 0.37741991, 0.37617779,
0.37698504, 0.37606784, 0.37499276, 0.37533731, 0.37350437,
0.37375172, 0.37385382, 0.37384003, 0.37338938, 0.37212288,
0.37273075, 0.370559, 0.37038506, 0.37062153, 0.36964661,
0.36818564, 0.3656634, 0.36539259, 0.36428672, 0.36502487,
0.3647148, 0.36551435, 0.36409919, 0.36348181, 0.36254383,
0.36166601, 0.36142665, 0.35954942, 0.35846915, 0.35886759,
0.35813867, 0.35642888, 0.35375231, 0.35061783, 0.35078463,
0.34995508, 0.34688918, 0.34548257, 0.34633158, 0.34622833,
0.34652111, 0.34622774, 0.34540951, 0.34418809, 0.34276593,
0.34160916, 0.33811193, 0.33822709, 0.3391685, 0.33883381])
test_volatility = eval_volatility(self.long_data)
test_volatility_roll = self.long_data['volatility'].values
self.assertAlmostEqual(test_volatility, np.nanmean(expected_volatility))
self.assertTrue(np.allclose(expected_volatility, test_volatility_roll, equal_nan=True))
def test_sharp(self):
self.assertAlmostEqual(eval_sharp(self.test_data1, 5, 0), 0.06135557)
self.assertAlmostEqual(eval_sharp(self.test_data2, 5, 0), 0.167858667)
self.assertAlmostEqual(eval_sharp(self.test_data3, 5, 0), 0.09950547)
self.assertAlmostEqual(eval_sharp(self.test_data4, 5, 0), 0.154928241)
self.assertAlmostEqual(eval_sharp(self.test_data5, 5, 0.002), 0.007868673)
self.assertAlmostEqual(eval_sharp(self.test_data6, 5, 0.002), 0.018306537)
self.assertAlmostEqual(eval_sharp(self.test_data7, 5, 0.002), 0.006259971)
# 测试长数据的sharp率计算
expected_sharp = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.02346815, -0.02618783, -0.03763912, -0.03296276, -0.03085698,
-0.02851101, -0.02375842, -0.02016746, -0.01107885, -0.01426613,
-0.00787204, -0.01135784, -0.01164232, -0.01003481, -0.00022512,
-0.00046792, -0.01209378, -0.01278892, -0.01298135, -0.01938214,
-0.01671044, -0.02120509, -0.0244281, -0.02416067, -0.02763238,
-0.027579, -0.02372774, -0.02215294, -0.02467094, -0.02091266,
-0.02590194, -0.03049876, -0.02077131, -0.01483653, -0.02488144,
-0.02671638, -0.02561547, -0.01957986, -0.02479803, -0.02703162,
-0.02658087, -0.01641755, -0.01946472, -0.01647757, -0.01280889,
-0.00893643, -0.00643275, -0.00698457, -0.00549962, -0.00654677,
-0.00494757, -0.0035633, -0.00109037, 0.00750654, 0.00451208,
0.00625502, 0.01221367, 0.01326454, 0.01535037, 0.02269538,
0.02028715, 0.02127712, 0.02333264, 0.02273159, 0.01670643,
0.01376513, 0.01265342, 0.02211647, 0.01612449, 0.00856706,
-0.00077147, -0.00268848, 0.00210993, -0.00443934, -0.00411912,
-0.0018756, -0.00867461, -0.00581601, -0.00660835, -0.00861137,
-0.00678614, -0.01188408, -0.00589617, -0.00244323, -0.00201891,
-0.01042846, -0.01471016, -0.02167034, -0.02258554, -0.01306809,
-0.00909086, -0.01233746, -0.00595166, -0.00184208, 0.00750497,
0.01481886, 0.01761972, 0.01562886, 0.01446414, 0.01285826,
0.01357719, 0.00967613, 0.01636272, 0.01458437, 0.02280183,
0.02151903, 0.01700276, 0.01597368, 0.02114336, 0.02233297,
0.02585631, 0.02768459, 0.03519235, 0.04204535, 0.04328161,
0.04672855, 0.05046191, 0.04619848, 0.04525853, 0.05381529,
0.04598861, 0.03947394, 0.04665006, 0.05586077, 0.05617728,
0.06495018, 0.06205172, 0.05665466, 0.06500615, 0.0632062,
0.06084328, 0.05851466, 0.05659229, 0.05159347, 0.0432977,
0.0474047, 0.04231723, 0.03613176, 0.03618391, 0.03591012,
0.03885674, 0.0402686, 0.03846423, 0.04534014, 0.04721458,
0.05130912, 0.05026281, 0.05394312, 0.05529349, 0.05949243,
0.05463304, 0.06195165, 0.06767606, 0.06880985, 0.07048996,
0.07078815, 0.07420767, 0.06773439, 0.0658441, 0.06470875,
0.06302349, 0.06456876, 0.06411282, 0.06216669, 0.067094,
0.07055075, 0.07254976, 0.07119253, 0.06173308, 0.05393352,
0.05681246, 0.05250643, 0.06099845, 0.0655544, 0.06977334,
0.06636514, 0.06177949, 0.06869908, 0.06719767, 0.06178738,
0.05915714, 0.06882277, 0.06756821, 0.06507994, 0.06489791,
0.06553941, 0.073123, 0.07576757, 0.06805446, 0.06063571,
0.05033801, 0.05206971, 0.05540306, 0.05249118, 0.05755587,
0.0586174, 0.05051288, 0.0564852, 0.05757284, 0.06358355,
0.06130082, 0.04925482, 0.03834472, 0.04163981, 0.04648316,
0.04457858, 0.04324626, 0.04328791, 0.04156207, 0.04818652,
0.04972634, 0.06024123, 0.06489556, 0.06255485, 0.06069815,
0.06466389, 0.07081163, 0.07895358, 0.0881782, 0.09374151,
0.08336506, 0.08764795, 0.09080174, 0.08808926, 0.08641158,
0.07811943, 0.06885318, 0.06479503, 0.06851185, 0.07382819,
0.07047903, 0.06658251, 0.07638379, 0.08667974, 0.08867918,
0.08245323, 0.08961866, 0.09905298, 0.0961908, 0.08562706,
0.0839014, 0.0849072, 0.08338395, 0.08783487, 0.09463609,
0.10332336, 0.11806497, 0.11220297, 0.11589097, 0.11678405])
test_sharp = eval_sharp(self.long_data, 5, 0.00035)
self.assertAlmostEqual(np.nanmean(expected_sharp), test_sharp)
self.assertTrue(np.allclose(self.long_data['sharp'].values, expected_sharp, equal_nan=True))
def test_beta(self):
reference = self.test_data1
self.assertAlmostEqual(eval_beta(self.test_data2, reference, 'value'), -0.017148939)
self.assertAlmostEqual(eval_beta(self.test_data3, reference, 'value'), -0.042204233)
self.assertAlmostEqual(eval_beta(self.test_data4, reference, 'value'), -0.15652986)
self.assertAlmostEqual(eval_beta(self.test_data5, reference, 'value'), -0.049195532)
self.assertAlmostEqual(eval_beta(self.test_data6, reference, 'value'), -0.026995082)
self.assertAlmostEqual(eval_beta(self.test_data7, reference, 'value'), -0.01147809)
self.assertRaises(TypeError, eval_beta, [1, 2, 3], reference, 'value')
self.assertRaises(TypeError, eval_beta, self.test_data3, [1, 2, 3], 'value')
self.assertRaises(KeyError, eval_beta, self.test_data3, reference, 'not_found_value')
# 测试长数据的beta计算
expected_beta = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.04988841, -0.05127618, -0.04692104, -0.04272652, -0.04080598,
-0.0493347, -0.0460858, -0.0416761, -0.03691527, -0.03724924,
-0.03678865, -0.03987324, -0.03488321, -0.02567672, -0.02690303,
-0.03010128, -0.02437967, -0.02571932, -0.02455681, -0.02839811,
-0.03358653, -0.03396697, -0.03466321, -0.03050966, -0.0247583,
-0.01629325, -0.01880895, -0.01480403, -0.01348783, -0.00544294,
-0.00648176, -0.00467036, -0.01135331, -0.0156841, -0.02340763,
-0.02615705, -0.02730771, -0.02906174, -0.02860664, -0.02412914,
-0.02066416, -0.01744816, -0.02185133, -0.02145285, -0.02681765,
-0.02827694, -0.02394581, -0.02744096, -0.02778825, -0.02703065,
-0.03160023, -0.03615371, -0.03681072, -0.04265126, -0.04344738,
-0.04232421, -0.04705272, -0.04533344, -0.04605934, -0.05272737,
-0.05156463, -0.05134196, -0.04730733, -0.04425352, -0.03869831,
-0.04159571, -0.04223998, -0.04346747, -0.04229844, -0.04740093,
-0.04992507, -0.04621232, -0.04477644, -0.0486915, -0.04598224,
-0.04943463, -0.05006391, -0.05362256, -0.04994067, -0.05464769,
-0.05443275, -0.05513493, -0.05173594, -0.04500994, -0.04662891,
-0.03903505, -0.0419592, -0.04307773, -0.03925718, -0.03711574,
-0.03992631, -0.0433058, -0.04533641, -0.0461183, -0.05600344,
-0.05758377, -0.05959874, -0.05605942, -0.06002859, -0.06253002,
-0.06747014, -0.06427915, -0.05931947, -0.05769974, -0.04791515,
-0.05175088, -0.05748039, -0.05385232, -0.05072975, -0.05052637,
-0.05125567, -0.05005785, -0.05325104, -0.04977727, -0.04947867,
-0.05148544, -0.05739156, -0.05742069, -0.06047279, -0.0558414,
-0.06086126, -0.06265151, -0.06411129, -0.06828052, -0.06781762,
-0.07083409, -0.07211207, -0.06799162, -0.06913295, -0.06775162,
-0.0696265, -0.06678248, -0.06867502, -0.06581961, -0.07055823,
-0.06448184, -0.06097973, -0.05795587, -0.0618383, -0.06130145,
-0.06050652, -0.05936661, -0.05749424, -0.0499, -0.05050495,
-0.04962687, -0.05033439, -0.05070116, -0.05422009, -0.05369759,
-0.05548943, -0.05907353, -0.05933035, -0.05927918, -0.06227663,
-0.06011455, -0.05650432, -0.05828134, -0.05620949, -0.05715323,
-0.05482478, -0.05387113, -0.05095559, -0.05377999, -0.05334267,
-0.05220438, -0.04001521, -0.03892434, -0.03660782, -0.04282708,
-0.04324623, -0.04127048, -0.04227559, -0.04275226, -0.04347049,
-0.04125853, -0.03806295, -0.0330632, -0.03155531, -0.03277152,
-0.03304518, -0.03878731, -0.03830672, -0.03727434, -0.0370571,
-0.04509224, -0.04207632, -0.04116198, -0.04545179, -0.04584584,
-0.05287341, -0.05417433, -0.05175836, -0.05005509, -0.04268674,
-0.03442321, -0.03457309, -0.03613426, -0.03524391, -0.03629479,
-0.04361312, -0.02626705, -0.02406115, -0.03046384, -0.03181044,
-0.03375164, -0.03661673, -0.04520779, -0.04926951, -0.05726738,
-0.0584486, -0.06220608, -0.06800563, -0.06797431, -0.07562211,
-0.07481996, -0.07731229, -0.08413381, -0.09031826, -0.09691925,
-0.11018071, -0.11952675, -0.10826026, -0.11173895, -0.10756359,
-0.10775916, -0.11664559, -0.10505051, -0.10606547, -0.09855355,
-0.10004159, -0.10857084, -0.12209301, -0.11605758, -0.11105113,
-0.1155195, -0.11569505, -0.10513348, -0.09611072, -0.10719791,
-0.10843965, -0.11025856, -0.10247839, -0.10554044, -0.10927647,
-0.10645088, -0.09982498, -0.10542734, -0.09631372, -0.08229695])
test_beta_mean = eval_beta(self.long_data, self.long_bench, 'value')
test_beta_roll = self.long_data['beta'].values
self.assertAlmostEqual(test_beta_mean, np.nanmean(expected_beta))
self.assertTrue(np.allclose(test_beta_roll, expected_beta, equal_nan=True))
def test_alpha(self):
reference = self.test_data1
self.assertAlmostEqual(eval_alpha(self.test_data2, 5, reference, 'value', 0.5), 11.63072977)
self.assertAlmostEqual(eval_alpha(self.test_data3, 5, reference, 'value', 0.5), 1.886590071)
self.assertAlmostEqual(eval_alpha(self.test_data4, 5, reference, 'value', 0.5), 6.827021872)
self.assertAlmostEqual(eval_alpha(self.test_data5, 5, reference, 'value', 0.92), -1.192265168)
self.assertAlmostEqual(eval_alpha(self.test_data6, 5, reference, 'value', 0.92), -1.437142359)
self.assertAlmostEqual(eval_alpha(self.test_data7, 5, reference, 'value', 0.92), -1.781311545)
# 测试长数据的alpha计算
expected_alpha = np.array([np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.09418119, -0.11188463, -0.17938358, -0.15588172, -0.1462678,
-0.13089586, -0.10780125, -0.09102891, -0.03987585, -0.06075686,
-0.02459503, -0.04104284, -0.0444565, -0.04074585, 0.02191275,
0.02255955, -0.05583375, -0.05875539, -0.06055551, -0.09648245,
-0.07913737, -0.10627829, -0.12320965, -0.12368335, -0.1506743,
-0.15768033, -0.13638829, -0.13065298, -0.14537834, -0.127428,
-0.15504529, -0.18184636, -0.12652146, -0.09190138, -0.14847221,
-0.15840648, -0.1525789, -0.11859418, -0.14700954, -0.16295761,
-0.16051645, -0.10364859, -0.11961134, -0.10258267, -0.08090148,
-0.05727746, -0.0429945, -0.04672356, -0.03581408, -0.0439215,
-0.03429495, -0.0260362, -0.01075022, 0.04931808, 0.02779388,
0.03984083, 0.08311951, 0.08995566, 0.10522428, 0.16159058,
0.14238174, 0.14759783, 0.16257712, 0.158908, 0.11302115,
0.0909566, 0.08272888, 0.15261884, 0.10546376, 0.04990313,
-0.01284111, -0.02720704, 0.00454725, -0.03965491, -0.03818265,
-0.02186992, -0.06574751, -0.04846454, -0.05204211, -0.06316498,
-0.05095099, -0.08502656, -0.04681162, -0.02362027, -0.02205091,
-0.07706374, -0.10371841, -0.14434688, -0.14797935, -0.09055402,
-0.06739549, -0.08824959, -0.04855888, -0.02291244, 0.04027138,
0.09370505, 0.11472939, 0.10243593, 0.0921445, 0.07662648,
0.07946651, 0.05450718, 0.10497677, 0.09068334, 0.15462924,
0.14231034, 0.10544952, 0.09980256, 0.14035223, 0.14942974,
0.17624102, 0.19035477, 0.2500807, 0.30724652, 0.31768915,
0.35007521, 0.38412975, 0.34356521, 0.33614463, 0.41206165,
0.33999177, 0.28045963, 0.34076789, 0.42220356, 0.42314636,
0.50790423, 0.47713348, 0.42520169, 0.50488411, 0.48705211,
0.46252601, 0.44325578, 0.42640573, 0.37986783, 0.30652822,
0.34503393, 0.2999069, 0.24928617, 0.24730218, 0.24326897,
0.26657905, 0.27861168, 0.26392824, 0.32552649, 0.34177792,
0.37837011, 0.37025267, 0.4030612, 0.41339361, 0.45076809,
0.40383354, 0.47093422, 0.52505036, 0.53614256, 0.5500943,
0.55319293, 0.59021451, 0.52358459, 0.50605947, 0.49359168,
0.47895956, 0.49320243, 0.4908336, 0.47310767, 0.51821564,
0.55105932, 0.57291504, 0.5599809, 0.46868842, 0.39620087,
0.42086934, 0.38317217, 0.45934108, 0.50048866, 0.53941991,
0.50676751, 0.46500915, 0.52993663, 0.51668366, 0.46405428,
0.44100603, 0.52726147, 0.51565458, 0.49186248, 0.49001081,
0.49367648, 0.56422294, 0.58882785, 0.51334664, 0.44386256,
0.35056709, 0.36490029, 0.39205071, 0.3677061, 0.41134736,
0.42315067, 0.35356394, 0.40324562, 0.41340007, 0.46503322,
0.44355762, 0.34854314, 0.26412842, 0.28633753, 0.32335224,
0.30761141, 0.29709569, 0.29570487, 0.28000063, 0.32802547,
0.33967726, 0.42511212, 0.46252357, 0.44244974, 0.42152907,
0.45436727, 0.50482359, 0.57339198, 0.6573356, 0.70912003,
0.60328917, 0.6395092, 0.67015805, 0.64241557, 0.62779142,
0.55028063, 0.46448736, 0.43709245, 0.46777983, 0.51789439,
0.48594916, 0.4456216, 0.52008189, 0.60548684, 0.62792473,
0.56645031, 0.62766439, 0.71829315, 0.69481356, 0.59550329,
0.58133754, 0.59014148, 0.58026655, 0.61719273, 0.67373203,
0.75573056, 0.89501633, 0.8347253, 0.87964685, 0.89015835])
test_alpha_mean = eval_alpha(self.long_data, 100, self.long_bench, 'value')
test_alpha_roll = self.long_data['alpha'].values
self.assertAlmostEqual(test_alpha_mean, np.nanmean(expected_alpha))
self.assertTrue(np.allclose(test_alpha_roll, expected_alpha, equal_nan=True))
def test_calmar(self):
"""test evaluate function eval_calmar()"""
pass
def test_benchmark(self):
reference = self.test_data1
tr, yr = eval_benchmark(self.test_data2, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data3, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data4, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data5, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data6, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data7, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
def test_evaluate(self):
pass
class TestLoop(unittest.TestCase):
"""通过一个假设但精心设计的例子来测试loop_step以及loop方法的正确性"""
def setUp(self):
# 精心设计的模拟股票名称、交易日期、以及股票价格
self.shares = ['share1', 'share2', 'share3', 'share4', 'share5', 'share6', 'share7']
self.dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08',
'2016/09/09', '2016/09/12', '2016/09/13', '2016/09/14', '2016/09/15',
'2016/09/16', '2016/09/19', '2016/09/20', '2016/09/21', '2016/09/22',
'2016/09/23', '2016/09/26', '2016/09/27', '2016/09/28', '2016/09/29',
'2016/09/30', '2016/10/10', '2016/10/11', '2016/10/12', '2016/10/13',
'2016/10/14', '2016/10/17', '2016/10/18', '2016/10/19', '2016/10/20',
'2016/10/21', '2016/10/23', '2016/10/24', '2016/10/25', '2016/10/26',
'2016/10/27', '2016/10/29', '2016/10/30', '2016/10/31', '2016/11/01',
'2016/11/02', '2016/11/05', '2016/11/06', '2016/11/07', '2016/11/08',
'2016/11/09', '2016/11/12', '2016/11/13', '2016/11/14', '2016/11/15',
'2016/11/16', '2016/11/19', '2016/11/20', '2016/11/21', '2016/11/22']
self.dates = [pd.Timestamp(date_text) for date_text in self.dates]
self.prices = np.array([[5.35, 5.09, 5.03, 4.98, 4.50, 5.09, 4.75],
[5.66, 4.84, 5.21, 5.44, 4.35, 5.06, 4.48],
[5.79, 4.60, 5.02, 5.45, 4.07, 4.76, 4.56],
[5.56, 4.63, 5.50, 5.74, 3.88, 4.62, 4.62],
[5.88, 4.64, 5.07, 5.46, 3.74, 4.63, 4.62],
[6.25, 4.51, 5.11, 5.45, 3.98, 4.25, 4.59],
[5.93, 4.96, 5.15, 5.24, 4.08, 4.13, 4.33],
[6.39, 4.65, 5.02, 5.47, 4.00, 3.91, 3.88],
[6.31, 4.26, 5.10, 5.58, 4.28, 3.77, 3.47],
[5.86, 3.77, 5.24, 5.36, 4.01, 3.43, 3.51],
[5.61, 3.39, 4.93, 5.38, 4.14, 3.68, 3.83],
[5.31, 3.76, 4.96, 5.30, 4.49, 3.63, 3.67],
[5.40, 4.06, 5.40, 5.77, 4.49, 3.94, 3.79],
[5.03, 3.87, 5.74, 5.75, 4.46, 4.40, 4.18],
[5.38, 3.91, 5.53, 6.15, 4.13, 4.03, 4.02],
[5.79, 4.13, 5.79, 6.04, 3.79, 3.93, 4.41],
[6.27, 4.27, 5.68, 6.01, 4.23, 3.50, 4.65],
[6.59, 4.57, 5.90, 5.71, 4.57, 3.39, 4.89],
[6.91, 5.04, 5.75, 5.23, 4.92, 3.30, 4.41],
[6.71, 5.31, 6.11, 5.47, 5.28, 3.25, 4.66],
[6.33, 5.40, 5.77, 5.79, 5.67, 2.94, 4.37],
[6.07, 5.21, 5.85, 5.82, 6.00, 2.71, 4.58],
[5.98, 5.06, 5.61, 5.61, 5.89, 2.55, 4.76],
[6.46, 4.69, 5.75, 5.31, 5.55, 2.21, 4.37],
[6.95, 5.12, 5.50, 5.24, 5.39, 2.29, 4.16],
[6.77, 5.27, 5.14, 5.41, 5.26, 2.21, 4.02],
[6.70, 5.72, 5.31, 5.60, 5.31, 2.04, 3.77],
[6.28, 6.10, 5.68, 5.28, 5.22, 2.39, 3.38],
[6.61, 6.27, 5.73, 4.99, 4.90, 2.30, 3.07],
[6.25, 6.49, 6.04, 5.09, 4.57, 2.41, 2.90],
[6.47, 6.16, 6.27, 5.39, 4.96, 2.40, 2.50],
[6.45, 6.26, 6.60, 5.58, 4.82, 2.79, 2.76],
[6.88, 6.39, 6.10, 5.33, 4.39, 2.67, 2.29],
[7.00, 6.58, 6.25, 5.48, 4.63, 2.27, 2.17],
[6.59, 6.20, 6.73, 5.10, 5.05, 2.09, 1.84],
[6.59, 5.70, 6.91, 5.39, 4.68, 2.55, 1.83],
[6.64, 5.20, 7.01, 5.30, 5.02, 2.22, 2.21],
[6.38, 5.37, 7.36, 5.04, 4.84, 2.59, 2.00],
[6.10, 5.40, 7.72, 5.51, 4.60, 2.59, 1.76],
[6.35, 5.22, 7.68, 5.43, 4.66, 2.95, 1.27],
[6.52, 5.38, 7.62, 5.23, 4.41, 2.69, 1.40],
[6.87, 5.53, 7.74, 4.99, 4.87, 2.20, 1.11],
[6.84, 6.03, 7.53, 5.43, 4.42, 2.69, 1.60],
[7.09, 5.77, 7.46, 5.40, 4.08, 2.65, 1.23],
[6.88, 5.66, 7.84, 5.60, 4.16, 2.63, 1.59],
[6.84, 6.08, 8.11, 5.66, 4.10, 2.14, 1.50],
[6.98, 5.62, 8.04, 6.01, 4.43, 2.39, 1.80],
[7.02, 5.63, 7.65, 5.64, 4.07, 1.95, 1.55],
[7.13, 6.11, 7.52, 5.67, 3.97, 2.32, 1.35],
[7.59, 6.03, 7.67, 5.30, 4.16, 2.69, 1.51],
[7.61, 6.27, 7.47, 4.91, 4.12, 2.51, 1.08],
[7.21, 6.28, 7.44, 5.37, 4.04, 2.62, 1.06],
[7.48, 6.52, 7.59, 5.75, 3.84, 2.16, 1.43],
[7.66, 7.00, 7.94, 6.08, 3.46, 2.35, 1.43],
[7.51, 7.34, 8.25, 6.58, 3.18, 2.31, 1.74],
[7.12, 7.34, 7.77, 6.78, 3.10, 1.96, 1.44],
[6.97, 7.68, 8.03, 7.20, 3.55, 2.35, 1.83],
[6.67, 8.09, 7.87, 7.65, 3.66, 2.58, 1.71],
[6.20, 7.68, 7.58, 8.00, 3.66, 2.40, 2.12],
[6.34, 7.58, 7.33, 7.92, 3.29, 2.20, 2.45],
[6.22, 7.46, 7.22, 8.30, 2.80, 2.31, 2.85],
[5.98, 7.59, 6.86, 8.46, 2.88, 2.16, 2.79],
[6.37, 7.19, 7.18, 7.99, 3.04, 2.16, 2.91],
[6.56, 7.40, 7.54, 8.19, 3.45, 2.20, 3.26],
[6.26, 7.48, 7.24, 8.61, 3.88, 1.73, 3.14],
[6.69, 7.93, 6.85, 8.66, 3.58, 1.93, 3.53],
[7.13, 8.23, 6.60, 8.91, 3.60, 2.25, 3.65],
[6.83, 8.35, 6.65, 9.08, 3.97, 2.69, 3.69],
[7.31, 8.44, 6.74, 9.34, 4.05, 2.59, 3.50],
[7.43, 8.35, 7.19, 8.96, 4.40, 2.14, 3.25],
[7.54, 8.58, 7.14, 8.98, 4.06, 1.68, 3.64],
[7.18, 8.82, 6.88, 8.50, 3.60, 1.98, 4.00],
[7.21, 9.09, 7.14, 8.65, 3.61, 2.14, 3.63],
[7.45, 9.02, 7.30, 8.94, 4.10, 1.89, 3.78],
[7.37, 8.87, 6.95, 8.63, 3.74, 1.97, 3.42],
[6.88, 9.22, 7.02, 8.65, 4.02, 1.99, 3.76],
[7.08, 9.04, 7.38, 8.40, 3.95, 2.37, 3.62],
[6.75, 8.60, 7.50, 8.38, 3.81, 2.14, 3.67],
[6.60, 8.48, 7.60, 8.23, 3.71, 2.35, 3.61],
[6.21, 8.71, 7.15, 8.04, 3.94, 1.86, 3.39],
[6.36, 8.79, 7.30, 7.91, 4.43, 2.14, 3.43],
[6.82, 8.93, 7.80, 7.57, 4.07, 2.39, 3.33],
[6.45, 9.36, 8.15, 7.73, 4.04, 2.53, 3.03],
[6.85, 9.68, 8.40, 7.74, 4.34, 2.47, 3.28],
[6.48, 10.16, 8.87, 8.07, 4.80, 2.93, 3.46],
[6.10, 10.56, 8.53, 7.99, 5.18, 3.09, 3.25],
[5.64, 10.63, 8.94, 7.92, 4.90, 2.93, 2.95],
[6.01, 10.55, 8.52, 8.40, 5.40, 3.22, 2.87],
[6.21, 10.65, 8.80, 8.80, 5.73, 3.06, 2.63],
[6.61, 10.55, 8.92, 8.47, 5.62, 2.90, 2.40],
[7.02, 10.19, 9.20, 8.07, 5.20, 2.68, 2.53],
[7.04, 10.48, 8.71, 7.87, 4.85, 2.46, 2.96],
[6.77, 10.36, 8.25, 8.02, 5.18, 2.41, 3.26],
[7.09, 10.03, 8.19, 8.39, 4.72, 2.74, 2.97],
[6.65, 10.24, 7.80, 8.69, 4.62, 3.15, 3.16],
[7.07, 10.01, 7.69, 8.81, 4.55, 3.40, 3.58],
[6.80, 10.14, 7.23, 8.99, 4.37, 3.82, 3.23],
[6.79, 10.31, 6.98, 9.10, 4.26, 4.02, 3.62],
[6.48, 9.88, 7.07, 8.90, 4.25, 3.76, 3.13],
[6.39, 10.05, 6.95, 8.87, 4.59, 4.10, 2.93]])
# 精心设计的模拟PT持股仓位目标信号:
self.pt_signals = np.array([[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.000, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.250, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.200, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.100, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.150],
[0.133, 0.200, 0.050, 0.000, 0.062, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.133, 0.200, 0.050, 0.000, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.050, 0.150, 0.262, 0.100, 0.000],
[0.066, 0.200, 0.250, 0.150, 0.000, 0.300, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.386, 0.136, 0.170, 0.102, 0.000, 0.204, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.460, 0.119, 0.149, 0.089, 0.000, 0.179, 0.000],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.446, 0.116, 0.145, 0.087, 0.000, 0.087, 0.116],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.400, 0.208, 0.130, 0.078, 0.000, 0.078, 0.104],
[0.370, 0.193, 0.120, 0.072, 0.072, 0.072, 0.096],
[0.000, 0.222, 0.138, 0.222, 0.083, 0.222, 0.111],
[0.000, 0.222, 0.138, 0.222, 0.083, 0.222, 0.111],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.121, 0.195, 0.121, 0.195, 0.073, 0.195, 0.097],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.200, 0.320, 0.200, 0.000, 0.120, 0.000, 0.160],
[0.047, 0.380, 0.238, 0.000, 0.142, 0.000, 0.190],
[0.047, 0.380, 0.238, 0.000, 0.142, 0.000, 0.190],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.043, 0.434, 0.217, 0.000, 0.130, 0.000, 0.173],
[0.045, 0.454, 0.227, 0.000, 0.000, 0.000, 0.272],
[0.045, 0.454, 0.227, 0.000, 0.000, 0.000, 0.272],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.050, 0.000, 0.250, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300],
[0.000, 0.000, 0.400, 0.000, 0.000, 0.000, 0.300]])
# 精心设计的模拟PS比例交易信号,与模拟PT信号高度相似
self.ps_signals = np.array([[0.000, 0.000, 0.000, 0.000, 0.250, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.100, 0.150],
[0.200, 0.200, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.100, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, -0.750, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.333, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, -0.500, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, -1.000],
[0.000, 0.000, 0.000, 0.000, 0.200, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.500, 0.000, 0.000, 0.150, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.200, 0.000, -1.000, 0.200, 0.000],
[0.500, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.200, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, -0.500, 0.200],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.200, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.150, 0.000, 0.000],
[-1.000, 0.000, 0.000, 0.250, 0.000, 0.250, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.250, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, -1.000, 0.000, -1.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-0.800, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.100, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, -1.000, 0.000, 0.100],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, -1.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[-1.000, 0.000, 0.150, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000]])
# 精心设计的模拟VS股票交易信号,与模拟PS信号类似
self.vs_signals = np.array([[000, 000, 000, 000, 500, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 300, 300],
[400, 400, 000, 000, 000, 000, 000],
[000, 000, 250, 000, 000, 000, 000],
[000, 000, 000, 000, -400, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, -200, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, -300],
[000, 000, 000, 000, 500, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 000, 300, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 400, 000, -300, 600, 000],
[500, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[600, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, -400, 600],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 500, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 300, 000, 000],
[-500, 000, 000, 500, 000, 200, 000],
[000, 000, 000, 000, 000, 000, 000],
[500, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, -700, 000, -600, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-400, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 300, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, -600, 000, 300],
[000, 000, 000, 000, 000, 000, 000],
[000, -300, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[-200, 000, 700, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000],
[000, 000, 000, 000, 000, 000, 000]])
# 精心设计的模拟多价格交易信号,模拟50个交易日对三只股票的操作
self.multi_shares = ['000010', '000030', '000039']
self.multi_dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08']
self.multi_dates = [pd.Timestamp(date_text) for date_text in self.multi_dates]
# 操作的交易价格包括开盘价、最高价和收盘价
self.multi_prices_open = np.array([[10.02, 9.88, 7.26],
[10.00, 9.88, 7.00],
[9.98, 9.89, 6.88],
[9.97, 9.75, 6.91],
[9.99, 9.74, np.nan],
[10.01, 9.80, 6.81],
[10.04, 9.62, 6.63],
[10.06, 9.65, 6.45],
[10.06, 9.58, 6.16],
[10.11, 9.67, 6.24],
[10.11, 9.81, 5.96],
[10.07, 9.80, 5.97],
[10.06, 10.00, 5.96],
[10.09, 9.95, 6.20],
[10.03, 10.10, 6.35],
[10.02, 10.06, 6.11],
[10.06, 10.14, 6.37],
[10.08, 9.90, 5.58],
[9.99, 10.20, 5.65],
[10.00, 10.29, 5.65],
[10.03, 9.86, 5.19],
[10.02, 9.48, 5.42],
[10.06, 10.01, 6.30],
[10.03, 10.24, 6.15],
[9.97, 10.26, 6.05],
[9.94, 10.24, 5.89],
[9.83, 10.12, 5.22],
[9.78, 10.65, 5.20],
[9.77, 10.64, 5.07],
[9.91, 10.56, 6.04],
[9.92, 10.42, 6.12],
[9.97, 10.43, 5.85],
[9.91, 10.29, 5.67],
[9.90, 10.30, 6.02],
[9.88, 10.44, 6.04],
[9.91, 10.60, 7.07],
[9.63, 10.67, 7.64],
[9.64, 10.46, 7.99],
[9.57, 10.39, 7.59],
[9.55, 10.90, 8.73],
[9.58, 11.01, 8.72],
[9.61, 11.01, 8.97],
[9.62, np.nan, 8.58],
[9.55, np.nan, 8.71],
[9.57, 10.82, 8.77],
[9.61, 11.02, 8.40],
[9.63, 10.96, 7.95],
[9.64, 11.55, 7.76],
[9.61, 11.74, 8.25],
[9.56, 11.80, 7.51]])
self.multi_prices_high = np.array([[10.07, 9.91, 7.41],
[10.00, 10.04, 7.31],
[10.00, 9.93, 7.14],
[10.00, 10.04, 7.00],
[10.03, 9.84, np.nan],
[10.03, 9.88, 6.82],
[10.04, 9.99, 6.96],
[10.09, 9.70, 6.85],
[10.10, 9.67, 6.50],
[10.14, 9.71, 6.34],
[10.11, 9.85, 6.04],
[10.10, 9.90, 6.02],
[10.09, 10.00, 6.12],
[10.09, 10.20, 6.38],
[10.10, 10.11, 6.43],
[10.05, 10.18, 6.46],
[10.07, 10.21, 6.43],
[10.09, 10.26, 6.27],
[10.10, 10.38, 5.77],
[10.00, 10.47, 6.01],
[10.04, 10.42, 5.67],
[10.04, 10.07, 5.67],
[10.06, 10.24, 6.35],
[10.09, 10.27, 6.32],
[10.05, 10.38, 6.43],
[9.97, 10.43, 6.36],
[9.96, 10.39, 5.79],
[9.86, 10.65, 5.47],
[9.77, 10.84, 5.65],
[9.92, 10.65, 6.04],
[9.94, 10.73, 6.14],
[9.97, 10.63, 6.23],
[9.97, 10.51, 5.83],
[9.92, 10.35, 6.25],
[9.92, 10.46, 6.27],
[9.92, 10.63, 7.12],
[9.93, 10.74, 7.82],
[9.64, 10.76, 8.14],
[9.58, 10.54, 8.27],
[9.60, 11.02, 8.92],
[9.58, 11.12, 8.76],
[9.62, 11.17, 9.15],
[9.62, np.nan, 8.90],
[9.64, np.nan, 9.01],
[9.59, 10.92, 9.16],
[9.62, 11.15, 9.00],
[9.63, 11.11, 8.27],
[9.70, 11.55, 7.99],
[9.66, 11.95, 8.33],
[9.64, 11.93, 8.25]])
self.multi_prices_close = np.array([[10.04, 9.68, 6.64],
[10.00, 9.87, 7.26],
[10.00, 9.86, 7.03],
[9.99, 9.87, 6.87],
[9.97, 9.79, np.nan],
[9.99, 9.82, 6.64],
[10.03, 9.80, 6.85],
[10.03, 9.66, 6.70],
[10.06, 9.62, 6.39],
[10.06, 9.58, 6.22],
[10.11, 9.69, 5.92],
[10.09, 9.78, 5.91],
[10.07, 9.75, 6.11],
[10.06, 9.96, 5.91],
[10.09, 9.90, 6.23],
[10.03, 10.04, 6.28],
[10.03, 10.06, 6.28],
[10.06, 10.08, 6.27],
[10.08, 10.24, 5.70],
[10.00, 10.24, 5.56],
[9.99, 10.24, 5.67],
[10.03, 9.86, 5.16],
[10.03, 10.13, 5.69],
[10.06, 10.12, 6.32],
[10.03, 10.10, 6.14],
[9.97, 10.25, 6.25],
[9.94, 10.24, 5.79],
[9.83, 10.22, 5.26],
[9.77, 10.75, 5.05],
[9.84, 10.64, 5.45],
[9.91, 10.56, 6.06],
[9.93, 10.60, 6.21],
[9.96, 10.42, 5.69],
[9.91, 10.25, 5.46],
[9.91, 10.24, 6.02],
[9.88, 10.49, 6.69],
[9.91, 10.57, 7.43],
[9.64, 10.63, 7.72],
[9.56, 10.48, 8.16],
[9.57, 10.37, 7.83],
[9.55, 10.96, 8.70],
[9.57, 11.02, 8.71],
[9.61, np.nan, 8.88],
[9.61, np.nan, 8.54],
[9.55, 10.88, 8.87],
[9.57, 10.87, 8.87],
[9.63, 11.01, 8.18],
[9.64, 11.01, 7.80],
[9.65, 11.58, 7.97],
[9.62, 11.80, 8.25]])
# 交易信号包括三组,分别作用与开盘价、最高价和收盘价
# 此时的关键是股票交割期的处理,交割期不为0时,以交易日为单位交割
self.multi_signals = []
# multisignal的第一组信号为开盘价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.000, 0.000],
[0.000, -0.500, 0.000],
[0.000, -0.500, 0.000],
[0.000, 0.000, 0.000],
[0.150, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.300, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.300],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.350, 0.250],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.100, 0.000, 0.350],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.050, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第二组信号为最高价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.150, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, -0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.200],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第三组信号为收盘价信号
self.multi_signals.append(
pd.DataFrame(np.array([[0.000, 0.200, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-0.500, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, -0.800],
[0.000, 0.000, 0.000],
[0.000, -1.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-0.750, 0.000, 0.000],
[0.000, 0.000, -0.850],
[0.000, 0.000, 0.000],
[0.000, -0.700, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, -1.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[-1.000, 0.000, 0.000],
[0.000, -1.000, 0.000],
[0.000, 0.000, 0.000]]),
columns=self.multi_shares,
index=self.multi_dates
)
)
# 交易回测所需的价格也有三组,分别是开盘价、最高价和收盘价
self.multi_histories = []
# multisignal的第一组信号为开盘价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_open,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第二组信号为最高价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_high,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 第三组信号为收盘价信号
self.multi_histories.append(
pd.DataFrame(self.multi_prices_close,
columns=self.multi_shares,
index=self.multi_dates
)
)
# 设置回测参数
self.cash = qt.CashPlan(['2016/07/01', '2016/08/12', '2016/09/23'], [10000, 10000, 10000])
self.rate = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=0,
sell_min=0,
slipage=0)
self.rate2 = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=10,
sell_min=5,
slipage=0)
self.pt_signal_hp = dataframe_to_hp(
pd.DataFrame(self.pt_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.ps_signal_hp = dataframe_to_hp(
pd.DataFrame(self.ps_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.vs_signal_hp = dataframe_to_hp(
pd.DataFrame(self.vs_signals, index=self.dates, columns=self.shares),
htypes='close'
)
self.multi_signal_hp = stack_dataframes(
self.multi_signals,
stack_along='htypes',
htypes='open, high, close'
)
self.history_list = dataframe_to_hp(
pd.DataFrame(self.prices, index=self.dates, columns=self.shares),
htypes='close'
)
self.multi_history_list = stack_dataframes(
self.multi_histories,
stack_along='htypes',
htypes='open, high, close'
)
# 模拟PT信号回测结果
# PT信号,先卖后买,交割期为0
self.pt_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 6035.8333, 0.0000, 9761.1111],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9674.8209],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9712.5872],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9910.7240],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9919.3782],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9793.0692],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9513.8217],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9123.5935],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9000.5995],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9053.4865],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9248.7142],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9161.1372],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9197.3369],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9504.6981],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9875.2461],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10241.5400],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10449.2398],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10628.3269],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10500.7893],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 0.0000, 5233.1396, 0.0000, 10449.2776],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10338.2857],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10194.3474],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10471.0008],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10411.2629],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10670.0618],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10652.4799],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10526.1488],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10458.6614],
[101.4983, 417.9188, 821.7315, 288.6672, 0.0000, 2576.1284, 0.0000, 4487.0722, 0.0000, 20609.0270],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21979.4972],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21880.9628],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21630.0454],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 20968.0007],
[1216.3282, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21729.9339],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21107.6400],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21561.1745],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21553.0916],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22316.9366],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22084.2862],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 21777.3543],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22756.8225],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22843.4697],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 0.0000, 2172.0393, 0.0000, 22762.1766],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22257.0973],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 23136.5259],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 21813.7852],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22395.3204],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 23717.6858],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 1607.1030, 1448.0262, 0.0000, 0.0000, 22715.4263],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 22498.3254],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 23341.1733],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24162.3941],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24847.1508],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 23515.9755],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24555.8997],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24390.6372],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24073.3309],
[1216.3282, 417.9188, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 2455.7405, 0.0000, 24394.6500],
[2076.3314, 903.0334, 511.8829, 288.6672, 0.0000, 669.7975, 1448.0262, 3487.5655, 0.0000, 34904.8150],
[0.0000, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 4608.8037, 0.0000, 34198.4475],
[0.0000, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 4608.8037, 0.0000, 33753.0190],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 34953.8178],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 33230.2498],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 35026.7819],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 36976.2649],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 38673.8147],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 38717.3429],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 36659.0854],
[644.7274, 903.0334, 511.8829, 897.4061, 0.0000, 3514.8404, 1448.0262, 379.3918, 0.0000, 35877.9607],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36874.4840],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37010.2695],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 38062.3510],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36471.1357],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37534.9927],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 37520.2569],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36747.7952],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36387.9409],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 35925.9715],
[644.7274, 1337.8498, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 2853.5665, 0.0000, 36950.7028],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 37383.2463],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 37761.2724],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 39548.2653],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41435.1291],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41651.6261],
[644.7274, 1657.3981, 1071.9327, 0.0000, 1229.1495, 0.0000, 1448.0262, 0.0000, 0.0000, 41131.9920],
[644.7274, 1657.3981, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 0.0000, 0.0000, 41286.4702],
[644.7274, 1657.3981, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 0.0000, 0.0000, 40978.7259],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 40334.5453],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 41387.9172],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42492.6707],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42953.7188],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42005.1092],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 42017.9106],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 43750.2824],
[644.7274, 0.0000, 1071.9327, 0.0000, 0.0000, 0.0000, 3760.7116, 17485.5497, 0.0000, 41766.8679],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 42959.1150],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 41337.9320],
[0.0000, 0.0000, 2461.8404, 0.0000, 0.0000, 0.0000, 3760.7116, 12161.6930, 0.0000, 40290.3688]])
# PT信号,先买后卖,交割期为0
self.pt_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 6035.8333, 0.0000, 9761.1111],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9674.8209],
[348.0151, 417.9188, 0.0000, 0.0000, 555.5556, 0.0000, 321.0892, 2165.9050, 0.0000, 9712.5872],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9910.7240],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9919.3782],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9793.0692],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9513.8217],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9123.5935],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9000.5995],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9053.4865],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9248.7142],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9161.1372],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9197.3369],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9504.6981],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 9875.2461],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10241.5400],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10449.2398],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10628.3269],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 321.0892, 3762.5512, 0.0000, 10500.7893],
[348.0151, 417.9188, 0.0000, 0.0000, 154.3882, 0.0000, 0.0000, 5233.1396, 0.0000, 10449.2776],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10338.2857],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10194.3474],
[348.0151, 417.9188, 0.0000, 0.0000, 459.8694, 0.0000, 0.0000, 3433.8551, 0.0000, 10471.0008],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10411.2629],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10670.0618],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10652.4799],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10526.1488],
[101.4983, 417.9188, 0.0000, 288.6672, 459.8694, 0.0000, 0.0000, 3541.0848, 0.0000, 10458.6614],
[101.4983, 417.9188, 821.7315, 288.6672, 0.0000, 2576.1284, 0.0000, 4487.0722, 0.0000, 20609.0270],
[797.1684, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 2703.5808, 0.0000, 21979.4972],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21700.7241],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21446.6630],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 20795.3593],
[1190.1307, 417.9188, 821.7315, 288.6672, 0.0000, 1607.1030, 0.0000, 0.0000, 0.0000, 21557.2924],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 20933.6887],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21392.5581],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21390.2918],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22147.7562],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21910.9053],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 21594.2980],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22575.4380],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22655.8312],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 0.0000, 2201.6110, 0.0000, 22578.4365],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22073.2661],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22955.2367],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 21628.1647],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 22203.4237],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 1607.1030, 1467.7407, 0.0000, 0.0000, 23516.2598],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 22505.8428],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 22199.1042],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23027.9302],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23848.5806],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24540.8871],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23205.6838],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24267.6685],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24115.3796],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 23814.3667],
[1190.1307, 417.9188, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 2278.3728, 0.0000, 24133.6611],
[2061.6837, 896.6628, 507.6643, 288.6672, 0.0000, 699.3848, 1467.7407, 3285.8830, 0.0000, 34658.5742],
[0.0000, 896.6628, 507.6643, 466.6033, 0.0000, 1523.7106, 1467.7407, 12328.8684, 0.0000, 33950.7917],
[0.0000, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 4380.3797, 0.0000, 33711.4045],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 34922.0959],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 33237.1081],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 35031.8071],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 36976.3376],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 38658.5245],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 38712.2854],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 36655.3125],
[644.1423, 896.6628, 507.6643, 936.6623, 0.0000, 3464.7832, 1467.7407, 154.8061, 0.0000, 35904.3692],
[644.1423, 902.2617, 514.8253, 0.0000, 15.5990, 0.0000, 1467.7407, 14821.9004, 0.0000, 36873.9080],
[644.1423, 902.2617, 514.8253, 0.0000, 1220.8683, 0.0000, 1467.7407, 10470.8781, 0.0000, 36727.7895],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37719.9840],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36138.1277],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37204.0760],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 37173.1201],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36398.2298],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36034.2178],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 35583.6399],
[644.1423, 1338.1812, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 2753.1120, 0.0000, 36599.2645],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 37013.3408],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 37367.7449],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 39143.8273],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 41007.3074],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 41225.4657],
[644.1423, 1646.4805, 1033.4242, 0.0000, 1220.8683, 0.0000, 1467.7407, 0.0000, 0.0000, 40685.9525],
[644.1423, 1646.4805, 1033.4242, 0.0000, 0.0000, 0.0000, 1467.7407, 6592.6891, 0.0000, 40851.5435],
[644.1423, 1646.4805, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 0.0000, 0.0000, 41082.1210],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 40385.0135],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 41455.1513],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42670.6769],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 43213.7233],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42205.2480],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42273.9386],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 44100.0777],
[644.1423, 0.0000, 1033.4242, 0.0000, 0.0000, 0.0000, 3974.4666, 17370.3689, 0.0000, 42059.7208],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 43344.9653],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 41621.0324],
[0.0000, 0.0000, 2483.9522, 0.0000, 0.0000, 0.0000, 3974.4666, 11619.4102, 0.0000, 40528.0648]])
# PT信号,先卖后买,交割期为2天(股票)0天(现金)以便利用先卖的现金继续买入
self.pt_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 321.089, 6035.833, 0.000, 9761.111],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661],
[101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027],
[797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21584.441],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21309.576],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20664.323],
[1156.912, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21445.597],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 20806.458],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21288.441],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21294.365],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 22058.784],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21805.540],
[1156.912, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 2223.240, 0.000, 21456.333],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22459.720],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22611.602],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22470.912],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21932.634],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22425.864],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21460.103],
[1481.947, 417.919, 504.579, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22376.968],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23604.295],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 22704.826],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 22286.293],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23204.755],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24089.017],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24768.185],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23265.196],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24350.540],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24112.706],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 23709.076],
[1481.947, 417.919, 504.579, 288.667, 0.000, 763.410, 1577.904, 0.000, 0.000, 24093.545],
[2060.275, 896.050, 504.579, 288.667, 0.000, 763.410, 1577.904, 2835.944, 0.000, 34634.888],
[578.327, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 732.036, 0.000, 33912.261],
[0.000, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 4415.981, 0.000, 33711.951],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 34951.433],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 33224.596],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 35065.209],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 37018.699],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 38706.035],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 38724.569],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 36647.268],
[644.683, 896.050, 504.579, 889.896, 0.000, 3485.427, 1577.904, 186.858, 0.000, 35928.930],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36967.229],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37056.598],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 38129.862],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36489.333],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37599.602],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 37566.823],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36799.280],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36431.196],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 35940.942],
[644.683, 1341.215, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 2367.759, 0.000, 36973.050],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 37393.292],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 37711.276],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 39515.991],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41404.440],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41573.523],
[644.683, 1606.361, 1074.629, 0.000, 1232.241, 0.000, 1577.904, 0.000, 0.000, 41011.613],
[644.683, 1606.361, 1074.629, 0.000, 0.000, 0.000, 3896.406, 0.000, 0.000, 41160.181],
[644.683, 1606.361, 1074.629, 0.000, 0.000, 0.000, 3896.406, 0.000, 0.000, 40815.512],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 40145.531],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41217.281],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 42379.061],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 42879.589],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41891.452],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41929.003],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 43718.052],
[644.683, 0.000, 1074.629, 0.000, 0.000, 0.000, 3896.406, 16947.110, 0.000, 41685.916],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 42930.410],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 41242.589],
[0.000, 0.000, 2460.195, 0.000, 0.000, 0.000, 3896.406, 11653.255, 0.000, 40168.084]])
# PT信号,先买后卖,交割期为2天(股票)1天(现金)
self.pt_res_bs21 = np.array([
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 321.089, 6035.833, 0.000, 9761.111],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9674.821],
[348.015, 417.919, 0.000, 0.000, 555.556, 0.000, 321.089, 2165.905, 0.000, 9712.587],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9910.724],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9919.378],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9793.069],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9513.822],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9123.593],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9000.600],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9053.487],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9248.714],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9161.137],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9197.337],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9504.698],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 9875.246],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10241.540],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10449.240],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10628.327],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 321.089, 3762.551, 0.000, 10500.789],
[348.015, 417.919, 0.000, 0.000, 154.388, 0.000, 0.000, 5233.140, 0.000, 10449.278],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10338.286],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10194.347],
[348.015, 417.919, 0.000, 0.000, 459.869, 0.000, 0.000, 3433.855, 0.000, 10471.001],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10411.263],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10670.062],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10652.480],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10526.149],
[101.498, 417.919, 0.000, 288.667, 459.869, 0.000, 0.000, 3541.085, 0.000, 10458.661],
[101.498, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 4487.072, 0.000, 20609.027],
[797.168, 417.919, 821.732, 288.667, 0.000, 2576.128, 0.000, 0.000, 0.000, 21979.497],
[797.168, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 2475.037, 0.000, 21584.441],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21266.406],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 20623.683],
[1150.745, 417.919, 821.732, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21404.957],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 20765.509],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21248.748],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21256.041],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 22018.958],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21764.725],
[1150.745, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 2230.202, 0.000, 21413.241],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22417.021],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22567.685],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22427.699],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21889.359],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22381.938],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 21416.358],
[1476.798, 417.919, 503.586, 288.667, 0.000, 1649.148, 0.000, 0.000, 0.000, 22332.786],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 0.000, 2386.698, 0.000, 23557.595],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 23336.992],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 22907.742],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24059.201],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24941.902],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25817.514],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 24127.939],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25459.688],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25147.370],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 2209.906, 0.000, 0.000, 25005.842],
[1476.798, 417.919, 503.586, 288.667, 0.000, 761.900, 1086.639, 2752.004, 0.000, 25598.700],
[2138.154, 929.921, 503.586, 288.667, 0.000, 761.900, 1086.639, 4818.835, 0.000, 35944.098],
[661.356, 929.921, 503.586, 553.843, 0.000, 1954.237, 1086.639, 8831.252, 0.000, 35237.243],
[0.000, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 9460.955, 0.000, 35154.442],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36166.632],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 34293.883],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 35976.901],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37848.552],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39512.574],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 39538.024],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 37652.984],
[667.098, 929.921, 503.586, 553.843, 0.000, 3613.095, 1086.639, 5084.792, 0.000, 36687.909],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37749.277],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37865.518],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38481.190],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37425.087],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38051.341],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 38065.478],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37429.495],
[667.098, 1108.871, 745.260, 0.000, 512.148, 0.000, 1086.639, 11861.593, 0.000, 37154.479],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 36692.717],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37327.055],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 37937.630],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 38298.645],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 39689.369],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40992.397],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 41092.265],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 1086.639, 7576.628, 0.000, 40733.622],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40708.515],
[667.098, 1600.830, 745.260, 0.000, 512.148, 0.000, 3726.579, 0.000, 0.000, 40485.321],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 39768.059],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 40519.595],
[667.098, 0.000, 745.260, 0.000, 512.148, 0.000, 3726.579, 16888.760, 0.000, 41590.937],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42354.983],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41175.149],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 41037.902],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 42706.213],
[667.098, 0.000, 1283.484, 0.000, 512.148, 0.000, 3726.579, 12448.413, 0.000, 40539.205],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 41608.692],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39992.148],
[0.000, 0.000, 2384.452, 0.000, 512.148, 0.000, 3726.579, 9293.252, 0.000, 39134.828]])
# 模拟PS信号回测结果
# PS信号,先卖后买,交割期为0
self.ps_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111],
[346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118],
[346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9894.1621],
[115.7186, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 6179.7742, 0.0000, 20067.9370],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21133.5080],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20988.8485],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20596.7429],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 19910.7730],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20776.7070],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20051.7969],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20725.3884],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20828.8795],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21647.1811],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21310.1687],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20852.0993],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21912.3952],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21937.8282],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21962.4576],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21389.4018],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22027.4535],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 20939.9992],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21250.0636],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22282.7812],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21407.0658],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21160.2373],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 21826.7682],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22744.9403],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23466.1185],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22017.8821],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23191.4662],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 23099.0822],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22684.7671],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 1339.2073, 0.0000, 0.0000, 22842.1346],
[1073.8232, 416.6787, 735.6442, 269.8496, 1785.2055, 938.6967, 1339.2073, 5001.4246, 0.0000,
33323.8359],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 32820.2901],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 32891.2308],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 34776.5296],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 33909.0325],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 34560.1906],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 36080.4552],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 38618.4454],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 38497.9230],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 37110.0991],
[0.0000, 416.6787, 735.6442, 944.9611, 1785.2055, 3582.8836, 1339.2073, 0.0000, 0.0000, 35455.2467],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35646.1860],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35472.3020],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36636.4694],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35191.7035],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36344.2242],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36221.6005],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35943.5708],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35708.2608],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 35589.0286],
[0.0000, 416.6787, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 15126.2788, 0.0000, 36661.0285],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 36310.5909],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 36466.7637],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 37784.4918],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 39587.6766],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 40064.0191],
[0.0000, 823.2923, 735.6442, 0.0000, 1785.2055, 0.0000, 1339.2073, 11495.2197, 0.0000, 39521.6439],
[0.0000, 823.2923, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 17142.1018, 0.0000, 39932.2761],
[0.0000, 823.2923, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 17142.1018, 0.0000, 39565.2475],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 38943.1632],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39504.1184],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40317.8004],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40798.5768],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39962.5711],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 40194.4793],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 41260.4003],
[0.0000, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 2730.5758, 25827.8351, 0.0000, 39966.3024],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 40847.3160],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 39654.5445],
[0.0000, 0.0000, 1613.4518, 0.0000, 0.0000, 0.0000, 2730.5758, 19700.7377, 0.0000, 38914.8151]])
# PS信号,先买后卖,交割期为0
self.ps_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 0.0000, 0.0000, 7500.0000, 0.0000, 9916.6667],
[0.0000, 0.0000, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 5059.7222, 0.0000, 9761.1111],
[346.9824, 416.6787, 0.0000, 0.0000, 555.5556, 205.0654, 321.0892, 1201.2775, 0.0000, 9646.1118],
[346.9824, 416.6787, 191.0372, 0.0000, 555.5556, 205.0654, 321.0892, 232.7189, 0.0000, 9685.5858],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9813.2184],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9803.1288],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9608.0198],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9311.5727],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8883.6246],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8751.3900],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 8794.1811],
[346.9824, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 1891.0523, 0.0000, 9136.5704],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9209.3588],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9093.8294],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9387.5537],
[231.4373, 416.6787, 191.0372, 0.0000, 138.8889, 205.0654, 321.0892, 2472.2444, 0.0000, 9585.9589],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 9928.7771],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10060.3806],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10281.0021],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 321.0892, 3035.8041, 0.0000, 10095.5613],
[231.4373, 416.6787, 95.5186, 0.0000, 138.8889, 205.0654, 0.0000, 4506.3926, 0.0000, 10029.9571],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9875.6133],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9614.9463],
[231.4373, 416.6787, 95.5186, 0.0000, 474.2238, 205.0654, 0.0000, 2531.2699, 0.0000, 9824.1722],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9732.5743],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9968.3391],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 10056.1579],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9921.4925],
[115.7186, 416.6787, 95.5186, 269.8496, 474.2238, 205.0654, 0.0000, 1854.7990, 0.0000, 9894.1621],
[115.7186, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 6179.7742, 0.0000, 20067.9370],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21133.5080],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20988.8485],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20596.7429],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 19910.7730],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20776.7070],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20051.7969],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20725.3884],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20828.8795],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21647.1811],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21310.1687],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 20852.0993],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21912.3952],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21937.8282],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 1877.3934, 0.0000, 0.0000, 0.0000, 21962.4576],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21389.4018],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21625.6913],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 20873.0389],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 21450.9447],
[1073.8232, 416.6787, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 2008.8110, 0.0000, 22269.3892],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21969.5329],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21752.6924],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22000.6088],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23072.5655],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23487.5201],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22441.0460],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23201.2700],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 23400.9485],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 22306.2008],
[1073.8232, 737.0632, 735.6442, 269.8496, 0.0000, 938.6967, 0.0000, 0.0000, 0.0000, 21989.5913],
[1073.8232, 737.0632, 735.6442, 269.8496, 1708.7766, 938.6967, 0.0000, 5215.4255, 0.0000, 31897.1636],
[0.0000, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 6421.4626, 0.0000, 31509.5059],
[0.0000, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 6421.4626, 0.0000, 31451.7888],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32773.4592],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32287.0318],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 32698.1938],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 34031.5183],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 35537.8336],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 36212.6487],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 36007.5294],
[978.8815, 737.0632, 735.6442, 578.0898, 1708.7766, 2145.9711, 0.0000, 0.0000, 0.0000, 34691.3797],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 33904.8810],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34341.6098],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 35479.9505],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34418.4455],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34726.7182],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34935.0407],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 34136.7505],
[978.8815, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 9162.7865, 0.0000, 33804.1575],
[195.7763, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 14025.8697, 0.0000, 33653.8970],
[195.7763, 737.0632, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 14025.8697, 0.0000, 34689.8757],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 34635.7841],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 35253.2755],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 36388.1051],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 37987.4204],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 38762.2103],
[195.7763, 1124.9219, 735.6442, 0.0000, 1708.7766, 0.0000, 0.0000, 10562.2913, 0.0000, 38574.0544],
[195.7763, 1124.9219, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 15879.4935, 0.0000, 39101.9156],
[195.7763, 1124.9219, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 15879.4935, 0.0000, 39132.5587],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 38873.2941],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39336.6594],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39565.9568],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39583.4317],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39206.8350],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39092.6551],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 39666.1834],
[195.7763, 0.0000, 735.6442, 0.0000, 0.0000, 0.0000, 1362.4361, 27747.4200, 0.0000, 38798.0749],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 39143.5561],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 38617.8779],
[0.0000, 0.0000, 1576.8381, 0.0000, 0.0000, 0.0000, 1362.4361, 23205.2077, 0.0000, 38156.1701]])
# PS信号,先卖后买,交割期为2天(股票)1天(现金)
self.ps_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 205.065, 321.089, 5059.722, 0.000, 9761.111],
[346.982, 416.679, 0.000, 0.000, 555.556, 205.065, 321.089, 1201.278, 0.000, 9646.112],
[346.982, 416.679, 191.037, 0.000, 555.556, 205.065, 321.089, 232.719, 0.000, 9685.586],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9813.218],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9803.129],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9608.020],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9311.573],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8883.625],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8751.390],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8794.181],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9136.570],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9209.359],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9093.829],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9387.554],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9585.959],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 9928.777],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10060.381],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10281.002],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10095.561],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 0.000, 4506.393, 0.000, 10029.957],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9875.613],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9614.946],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9824.172],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9732.574],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9968.339],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 10056.158],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9921.492],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9894.162],
[115.719, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 6179.774, 0.000, 20067.937],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21133.508],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20988.848],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20596.743],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 19910.773],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20776.707],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20051.797],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20725.388],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20828.880],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21647.181],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21310.169],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20852.099],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21912.395],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21937.828],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21962.458],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21389.402],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22027.453],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 20939.999],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21250.064],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22282.781],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21407.066],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21160.237],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21826.768],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22744.940],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23466.118],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22017.882],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23191.466],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23099.082],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22684.767],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22842.135],
[1073.823, 416.679, 735.644, 269.850, 1785.205, 938.697, 1339.207, 5001.425, 0.000, 33323.836],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 32820.290],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 32891.231],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 34776.530],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 33909.032],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 34560.191],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 36080.455],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 38618.445],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 38497.923],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 37110.099],
[0.000, 416.679, 735.644, 944.961, 1785.205, 3582.884, 1339.207, 0.000, 0.000, 35455.247],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35646.186],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35472.302],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36636.469],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35191.704],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36344.224],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36221.601],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35943.571],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35708.261],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 35589.029],
[0.000, 416.679, 735.644, 0.000, 1785.205, 0.000, 1339.207, 15126.279, 0.000, 36661.029],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 36310.591],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 36466.764],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 37784.492],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 39587.677],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 40064.019],
[0.000, 823.292, 735.644, 0.000, 1785.205, 0.000, 1339.207, 11495.220, 0.000, 39521.644],
[0.000, 823.292, 735.644, 0.000, 0.000, 0.000, 2730.576, 17142.102, 0.000, 39932.276],
[0.000, 823.292, 735.644, 0.000, 0.000, 0.000, 2730.576, 17142.102, 0.000, 39565.248],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 38943.163],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39504.118],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40317.800],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40798.577],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39962.571],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 40194.479],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 41260.400],
[0.000, 0.000, 735.644, 0.000, 0.000, 0.000, 2730.576, 25827.835, 0.000, 39966.302],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 40847.316],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 39654.544],
[0.000, 0.000, 1613.452, 0.000, 0.000, 0.000, 2730.576, 19700.738, 0.000, 38914.815]])
# PS信号,先买后卖,交割期为2天(股票)1天(现金)
self.ps_res_bs21 = np.array(
[[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 208.333, 326.206, 5020.833, 0.000, 9761.111],
[351.119, 421.646, 0.000, 0.000, 555.556, 208.333, 326.206, 1116.389, 0.000, 9645.961],
[351.119, 421.646, 190.256, 0.000, 555.556, 208.333, 326.206, 151.793, 0.000, 9686.841],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9813.932],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9803.000],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9605.334],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9304.001],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8870.741],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8738.282],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 8780.664],
[351.119, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 1810.126, 0.000, 9126.199],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9199.746],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9083.518],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9380.932],
[234.196, 421.646, 190.256, 0.000, 138.889, 208.333, 326.206, 2398.247, 0.000, 9581.266],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 9927.154],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10059.283],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10281.669],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 326.206, 2959.501, 0.000, 10093.263],
[234.196, 421.646, 95.128, 0.000, 138.889, 208.333, 0.000, 4453.525, 0.000, 10026.289],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9870.523],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9606.437],
[234.196, 421.646, 95.128, 0.000, 479.340, 208.333, 0.000, 2448.268, 0.000, 9818.691],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9726.556],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9964.547],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 10053.449],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9917.440],
[117.098, 421.646, 95.128, 272.237, 479.340, 208.333, 0.000, 1768.219, 0.000, 9889.495],
[117.098, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 6189.948, 0.000, 20064.523],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21124.484],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20827.077],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20396.124],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 19856.445],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20714.156],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 19971.485],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20733.948],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20938.903],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21660.772],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 21265.298],
[708.171, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 2377.527, 0.000, 20684.378],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21754.770],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21775.215],
[1055.763, 421.646, 729.561, 272.237, 0.000, 1865.791, 0.000, 0.000, 0.000, 21801.488],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21235.427],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21466.714],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 20717.431],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 21294.450],
[1055.763, 421.646, 729.561, 272.237, 0.000, 932.896, 0.000, 1996.397, 0.000, 22100.247],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21802.552],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21593.608],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21840.028],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22907.725],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23325.945],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22291.942],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23053.050],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 23260.084],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 22176.244],
[1055.763, 740.051, 729.561, 272.237, 0.000, 932.896, 0.000, 0.000, 0.000, 21859.297],
[1055.763, 740.051, 729.561, 272.237, 1706.748, 932.896, 0.000, 5221.105, 0.000, 31769.617],
[0.000, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 6313.462, 0.000, 31389.961],
[0.000, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 6313.462, 0.000, 31327.498],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32647.140],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32170.095],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 32577.742],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 33905.444],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 35414.492],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 36082.120],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 35872.293],
[962.418, 740.051, 729.561, 580.813, 1706.748, 2141.485, 0.000, 0.000, 0.000, 34558.132],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 33778.138],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34213.578],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 35345.791],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34288.014],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34604.406],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34806.850],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 34012.232],
[962.418, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 9177.053, 0.000, 33681.345],
[192.484, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 13958.345, 0.000, 33540.463],
[192.484, 740.051, 729.561, 0.000, 1706.748, 0.000, 0.000, 13958.345, 0.000, 34574.280],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 34516.781],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 35134.412],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 36266.530],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 37864.376],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 38642.633],
[192.484, 1127.221, 729.561, 0.000, 1706.748, 0.000, 0.000, 10500.917, 0.000, 38454.227],
[192.484, 1127.221, 729.561, 0.000, 0.000, 0.000, 1339.869, 15871.934, 0.000, 38982.227],
[192.484, 1127.221, 729.561, 0.000, 0.000, 0.000, 1339.869, 15871.934, 0.000, 39016.154],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38759.803],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39217.182],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39439.690],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39454.081],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39083.341],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38968.694],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 39532.030],
[192.484, 0.000, 729.561, 0.000, 0.000, 0.000, 1339.869, 27764.114, 0.000, 38675.507],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 39013.741],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 38497.668],
[0.000, 0.000, 1560.697, 0.000, 0.000, 0.000, 1339.869, 23269.751, 0.000, 38042.410]])
# 模拟VS信号回测结果
# VS信号,先卖后买,交割期为0
self.vs_res_sb00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750.0000, 0.0000, 10000.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750.0000, 0.0000, 9925.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 4954.0000, 0.0000, 9785.0000],
[400.0000, 400.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 878.0000, 0.0000, 9666.0000],
[400.0000, 400.0000, 173.1755, 0.0000, 500.0000, 300.0000, 300.0000, 0.0000, 0.0000, 9731.0000],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9830.9270],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9785.8540],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9614.3412],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9303.1953],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8834.4398],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8712.7554],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 8717.9507],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592.0000, 0.0000, 9079.1479],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9166.0276],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9023.6607],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9291.6864],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598.0000, 0.0000, 9411.6371],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9706.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9822.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9986.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9805.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 0.0000, 4993.7357, 0.0000, 9704.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9567.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9209.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9407.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9329.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9545.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9652.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9414.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9367.7357],
[0.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 9319.7357, 0.0000, 19556.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20094.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19849.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19802.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19487.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19749.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19392.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19671.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19756.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20111.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19867.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19775.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20314.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20310.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20253.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20044.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20495.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 19798.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20103.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20864.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20425.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20137.8405],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20711.3567],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21470.3891],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21902.9538],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 20962.9538],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21833.5184],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21941.8169],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21278.5184],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0.0000, 0.0000, 21224.4700],
[1100.0000, 710.4842, 400.0000, 300.0000, 600.0000, 500.0000, 600.0000, 9160.0000, 0.0000, 31225.2119],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488.0000, 0.0000, 30894.5748],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488.0000, 0.0000, 30764.3811],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 31815.5828],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 31615.4215],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 32486.1394],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 33591.2847],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34056.5428],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34756.4863],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34445.5428],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208.0000, 0.0000, 34433.9541],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33870.4703],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34014.3010],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34680.5671],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33890.9945],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34004.6640],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
34127.7768],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33421.1638],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346.0000, 0.0000,
33120.9057],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830.0000, 0.0000, 32613.3171],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830.0000, 0.0000, 33168.1558],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
33504.6236],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
33652.1318],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
34680.4867],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35557.5191],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35669.7128],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151.0000, 0.0000,
35211.4466],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530.0000, 0.0000, 35550.6079],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530.0000, 0.0000, 35711.6563],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35682.6079],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35880.8336],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36249.8740],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36071.6159],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35846.1562],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35773.3578],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 36274.9465],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695.0000, 0.0000, 35739.3094],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 36135.0917],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 35286.5835],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167.0000, 0.0000, 35081.3658]])
# VS信号,先买后卖,交割期为0
self.vs_res_bs00 = np.array(
[[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750, 0.0000, 10000],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 0.0000, 0.0000, 7750, 0.0000, 9925],
[0.0000, 0.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 4954, 0.0000, 9785],
[400.0000, 400.0000, 0.0000, 0.0000, 500.0000, 300.0000, 300.0000, 878, 0.0000, 9666],
[400.0000, 400.0000, 173.1755, 0.0000, 500.0000, 300.0000, 300.0000, 0, 0.0000, 9731],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9830.927022],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9785.854043],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9614.341223],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9303.195266],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8834.439842],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8712.755424],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 8717.95069],
[400.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 1592, 0.0000, 9079.147929],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9166.027613],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9023.66075],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9291.686391],
[200.0000, 400.0000, 173.1755, 0.0000, 100.0000, 300.0000, 300.0000, 2598, 0.0000, 9411.637081],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9706.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9822.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9986.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 300.0000, 3619.7357, 0.0000, 9805.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 100.0000, 300.0000, 0.0000, 4993.7357, 0.0000, 9704.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9567.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9209.7357],
[200.0000, 400.0000, 0.0000, 0.0000, 600.0000, 300.0000, 0.0000, 2048.7357, 0.0000, 9407.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9329.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9545.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9652.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9414.7357],
[0.0000, 400.0000, 0.0000, 300.0000, 600.0000, 300.0000, 0.0000, 1779.7357, 0.0000, 9367.7357],
[0.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 9319.7357, 0.0000, 19556.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20094.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19849.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19802.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19487.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19749.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19392.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19671.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19756.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 20111.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19867.7357],
[500.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 6094.7357, 0.0000, 19775.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20314.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20310.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 900.0000, 0.0000, 1990.7357, 0.0000, 20253.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20044.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20495.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 19798.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20103.7357],
[1100.0000, 400.0000, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 1946.7357, 0.0000, 20864.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20425.7357],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20137.84054],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20711.35674],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21470.38914],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21902.95375],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 20962.95375],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21833.51837],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21941.81688],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21278.51837],
[1100.0000, 710.4842, 400.0000, 300.0000, 300.0000, 500.0000, 600.0000, 0, 0.0000, 21224.46995],
[1100.0000, 710.4842, 400.0000, 300.0000, 600.0000, 500.0000, 600.0000, 9160, 0.0000, 31225.21185],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488, 0.0000, 30894.57479],
[600.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 7488, 0.0000, 30764.38113],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 31815.5828],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 31615.42154],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 32486.13941],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 33591.28466],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34056.54276],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34756.48633],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34445.54276],
[1100.0000, 710.4842, 400.0000, 800.0000, 600.0000, 700.0000, 600.0000, 4208, 0.0000, 34433.95412],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33870.47032],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34014.30104],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34680.56715],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33890.99452],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34004.66398],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 34127.77683],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33421.1638],
[1100.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11346, 0.0000, 33120.9057],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830, 0.0000, 32613.31706],
[700.0000, 710.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 13830, 0.0000, 33168.15579],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 33504.62357],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 33652.13176],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 34680.4867],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35557.51909],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35669.71276],
[700.0000, 1010.4842, 400.0000, 100.0000, 600.0000, 100.0000, 600.0000, 11151, 0.0000, 35211.44665],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530, 0.0000, 35550.60792],
[700.0000, 1010.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13530, 0.0000, 35711.65633],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35682.60792],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35880.83362],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36249.87403],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36071.61593],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35846.15615],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35773.35783],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 36274.94647],
[700.0000, 710.4842, 400.0000, 100.0000, 0.0000, 100.0000, 900.0000, 16695, 0.0000, 35739.30941],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 36135.09172],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 35286.58353],
[500.0000, 710.4842, 1100.0000, 100.0000, 0.0000, 100.0000, 900.0000, 13167, 0.0000, 35081.36584]])
# VS信号,先卖后买,交割期为2天(股票)1天(现金)
self.vs_res_sb20 = np.array(
[[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 9925.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 300.000, 300.000, 4954.000, 0.000, 9785.000],
[400.000, 400.000, 0.000, 0.000, 500.000, 300.000, 300.000, 878.000, 0.000, 9666.000],
[400.000, 400.000, 173.176, 0.000, 500.000, 300.000, 300.000, 0.000, 0.000, 9731.000],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9830.927],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9785.854],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9614.341],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9303.195],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8834.440],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8712.755],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8717.951],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9079.148],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9166.028],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9023.661],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9291.686],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9411.637],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9706.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9822.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9986.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9805.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 0.000, 4993.736, 0.000, 9704.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9567.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9209.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9407.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9329.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9545.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9652.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9414.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9367.736],
[0.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 9319.736, 0.000, 19556.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20094.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19849.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19802.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19487.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19749.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19392.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19671.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19756.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20111.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19867.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19775.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20314.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20310.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20253.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20044.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20495.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 19798.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20103.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20864.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20425.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20137.841],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20711.357],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21470.389],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21902.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20962.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21833.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21941.817],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21278.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21224.470],
[1100.000, 710.484, 400.000, 300.000, 600.000, 500.000, 600.000, 9160.000, 0.000, 31225.212],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30894.575],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30764.381],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31815.583],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31615.422],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 32486.139],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 33591.285],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34056.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34756.486],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34445.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34433.954],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33870.470],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34014.301],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34680.567],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33890.995],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34004.664],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34127.777],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33421.164],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33120.906],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 32613.317],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 33168.156],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33504.624],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33652.132],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 34680.487],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35557.519],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35669.713],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35211.447],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35550.608],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35711.656],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35682.608],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35880.834],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36249.874],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36071.616],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35846.156],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]])
# VS信号,先买后卖,交割期为2天(股票)1天(现金)
self.vs_res_bs21 = np.array(
[[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 0.000, 0.000, 7750.000, 0.000, 9925.000],
[0.000, 0.000, 0.000, 0.000, 500.000, 300.000, 300.000, 4954.000, 0.000, 9785.000],
[400.000, 400.000, 0.000, 0.000, 500.000, 300.000, 300.000, 878.000, 0.000, 9666.000],
[400.000, 400.000, 173.176, 0.000, 500.000, 300.000, 300.000, 0.000, 0.000, 9731.000],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9830.927],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9785.854],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9614.341],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9303.195],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8834.440],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8712.755],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 8717.951],
[400.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 1592.000, 0.000, 9079.148],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9166.028],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9023.661],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9291.686],
[200.000, 400.000, 173.176, 0.000, 100.000, 300.000, 300.000, 2598.000, 0.000, 9411.637],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9706.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9822.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9986.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 300.000, 3619.736, 0.000, 9805.736],
[200.000, 400.000, 0.000, 0.000, 100.000, 300.000, 0.000, 4993.736, 0.000, 9704.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9567.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9209.736],
[200.000, 400.000, 0.000, 0.000, 600.000, 300.000, 0.000, 2048.736, 0.000, 9407.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9329.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9545.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9652.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9414.736],
[0.000, 400.000, 0.000, 300.000, 600.000, 300.000, 0.000, 1779.736, 0.000, 9367.736],
[0.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 9319.736, 0.000, 19556.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20094.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19849.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19802.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19487.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19749.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19392.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19671.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19756.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 20111.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19867.736],
[500.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 6094.736, 0.000, 19775.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20314.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20310.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 900.000, 0.000, 1990.736, 0.000, 20253.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20044.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20495.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 19798.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20103.736],
[1100.000, 400.000, 400.000, 300.000, 300.000, 500.000, 600.000, 1946.736, 0.000, 20864.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20425.736],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20137.841],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20711.357],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21470.389],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21902.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 20962.954],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21833.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21941.817],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21278.518],
[1100.000, 710.484, 400.000, 300.000, 300.000, 500.000, 600.000, 0.000, 0.000, 21224.470],
[1100.000, 710.484, 400.000, 300.000, 600.000, 500.000, 600.000, 9160.000, 0.000, 31225.212],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30894.575],
[600.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 7488.000, 0.000, 30764.381],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31815.583],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 31615.422],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 32486.139],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 33591.285],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34056.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34756.486],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34445.543],
[1100.000, 710.484, 400.000, 800.000, 600.000, 700.000, 600.000, 4208.000, 0.000, 34433.954],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33870.470],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34014.301],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34680.567],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33890.995],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34004.664],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 34127.777],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33421.164],
[1100.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11346.000, 0.000, 33120.906],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 32613.317],
[700.000, 710.484, 400.000, 100.000, 600.000, 100.000, 600.000, 13830.000, 0.000, 33168.156],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33504.624],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 33652.132],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 34680.487],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35557.519],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35669.713],
[700.000, 1010.484, 400.000, 100.000, 600.000, 100.000, 600.000, 11151.000, 0.000, 35211.447],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35550.608],
[700.000, 1010.484, 400.000, 100.000, 0.000, 100.000, 900.000, 13530.000, 0.000, 35711.656],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35682.608],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35880.834],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36249.874],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36071.616],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35846.156],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35773.358],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 36274.946],
[700.000, 710.484, 400.000, 100.000, 0.000, 100.000, 900.000, 16695.000, 0.000, 35739.309],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 36135.092],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35286.584],
[500.000, 710.484, 1100.000, 100.000, 0.000, 100.000, 900.000, 13167.000, 0.000, 35081.366]])
# Multi信号处理结果,先卖后买,使用卖出的现金买进,交割期为2天(股票)0天(现金)
self.multi_res = np.array(
[[0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 9965.1867],
[0.0000, 357.2545, 0.0000, 6506.9627, 0.0000, 10033.0650],
[0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10034.8513],
[0.0000, 178.6273, 0.0000, 8273.5864, 0.0000, 10036.6376],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10019.3404],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10027.7062],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10030.1477],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10005.1399],
[150.3516, 178.6273, 0.0000, 6771.5740, 0.0000, 10002.5054],
[150.3516, 489.4532, 0.0000, 3765.8877, 0.0000, 9967.3860],
[75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10044.4059],
[75.1758, 391.5625, 0.0000, 5490.1377, 0.0000, 10078.1430],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10138.2709],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10050.4768],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10300.0711],
[75.1758, 391.5625, 846.3525, 392.3025, 0.0000, 10392.6970],
[75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10400.5282],
[75.1758, 391.5625, 169.2705, 4644.3773, 0.0000, 10408.9220],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10376.5914],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10346.8794],
[75.1758, 0.0000, 169.2705, 8653.9776, 0.0000, 10364.7474],
[75.1758, 381.1856, 645.5014, 2459.1665, 0.0000, 10302.4570],
[18.7939, 381.1856, 645.5014, 3024.6764, 0.0000, 10747.4929],
[18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11150.9107],
[18.7939, 381.1856, 96.8252, 6492.3097, 0.0000, 11125.2946],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11191.9956],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11145.7486],
[18.7939, 114.3557, 96.8252, 9227.3166, 0.0000, 11090.0768],
[132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11113.8733],
[132.5972, 114.3557, 864.3802, 4223.9548, 0.0000, 11456.3281],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21983.7333],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22120.6165],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21654.5327],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21429.6550],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 21912.5643],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 22516.3100],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23169.0777],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23390.8080],
[132.5972, 114.3557, 864.3802, 14223.9548, 0.0000, 23743.3742],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 23210.7311],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24290.4375],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 24335.3279],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18317.3553],
[132.5972, 559.9112, 864.3802, 9367.3999, 0.0000, 18023.4660],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24390.0527],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24389.6421],
[259.4270, 559.9112, 0.0000, 15820.6915, 0.0000, 24483.5953],
[0.0000, 559.9112, 0.0000, 18321.5674, 0.0000, 24486.1895],
[0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389],
[0.0000, 0.0000, 0.0000, 24805.3389, 0.0000, 24805.3389]])
def test_loop_step_pt_sb00(self):
""" test loop step PT-signal, sell first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.pt_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[2][7],
own_amounts=self.pt_res_sb00[2][0:7],
available_cash=self.pt_res_sb00[2][7],
available_amounts=self.pt_res_sb00[2][0:7],
op=self.pt_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[2][7] + c_g + c_s
amounts = self.pt_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[30][7],
own_amounts=self.pt_res_sb00[30][0:7],
available_cash=self.pt_res_sb00[30][7],
available_amounts=self.pt_res_sb00[30][0:7],
op=self.pt_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[30][7] + c_g + c_s
amounts = self.pt_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[59][7] + 10000,
own_amounts=self.pt_res_sb00[59][0:7],
available_cash=self.pt_res_sb00[59][7] + 10000,
available_amounts=self.pt_res_sb00[59][0:7],
op=self.pt_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.pt_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_sb00[95][7],
own_amounts=self.pt_res_sb00[95][0:7],
available_cash=self.pt_res_sb00[95][7],
available_amounts=self.pt_res_sb00[95][0:7],
op=self.pt_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_sb00[96][7] + c_g + c_s
amounts = self.pt_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_sb00[97][0:7]))
def test_loop_step_pt_bs00(self):
""" test loop step PT-signal, buy first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.pt_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[2][7],
own_amounts=self.pt_res_bs00[2][0:7],
available_cash=self.pt_res_bs00[2][7],
available_amounts=self.pt_res_bs00[2][0:7],
op=self.pt_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[2][7] + c_g + c_s
amounts = self.pt_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[30][7],
own_amounts=self.pt_res_bs00[30][0:7],
available_cash=self.pt_res_bs00[30][7],
available_amounts=self.pt_res_bs00[30][0:7],
op=self.pt_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[30][7] + c_g + c_s
amounts = self.pt_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[59][7] + 10000,
own_amounts=self.pt_res_bs00[59][0:7],
available_cash=self.pt_res_bs00[59][7] + 10000,
available_amounts=self.pt_res_bs00[59][0:7],
op=self.pt_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.pt_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=self.pt_res_bs00[95][7],
own_amounts=self.pt_res_bs00[95][0:7],
available_cash=self.pt_res_bs00[95][7],
available_amounts=self.pt_res_bs00[95][0:7],
op=self.pt_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.pt_res_bs00[96][7] + c_g + c_s
amounts = self.pt_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=0,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.pt_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.pt_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.pt_res_bs00[97][0:7]))
def test_loop_step_ps_sb00(self):
""" test loop step PS-signal, sell first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.ps_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[2][7],
own_amounts=self.ps_res_sb00[2][0:7],
available_cash=self.ps_res_sb00[2][7],
available_amounts=self.ps_res_sb00[2][0:7],
op=self.ps_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[2][7] + c_g + c_s
amounts = self.ps_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[30][7],
own_amounts=self.ps_res_sb00[30][0:7],
available_cash=self.ps_res_sb00[30][7],
available_amounts=self.ps_res_sb00[30][0:7],
op=self.ps_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[30][7] + c_g + c_s
amounts = self.ps_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[59][7] + 10000,
own_amounts=self.ps_res_sb00[59][0:7],
available_cash=self.ps_res_sb00[59][7] + 10000,
available_amounts=self.ps_res_sb00[59][0:7],
op=self.ps_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.ps_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_sb00[95][7],
own_amounts=self.ps_res_sb00[95][0:7],
available_cash=self.ps_res_sb00[95][7],
available_amounts=self.ps_res_sb00[95][0:7],
op=self.ps_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_sb00[96][7] + c_g + c_s
amounts = self.ps_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_sb00[97][0:7]))
def test_loop_step_ps_bs00(self):
""" test loop step PS-signal, buy first"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.ps_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[2][7],
own_amounts=self.ps_res_sb00[2][0:7],
available_cash=self.ps_res_bs00[2][7],
available_amounts=self.ps_res_bs00[2][0:7],
op=self.ps_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[2][7] + c_g + c_s
amounts = self.ps_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[30][7],
own_amounts=self.ps_res_sb00[30][0:7],
available_cash=self.ps_res_bs00[30][7],
available_amounts=self.ps_res_bs00[30][0:7],
op=self.ps_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[30][7] + c_g + c_s
amounts = self.ps_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[59][7] + 10000,
own_amounts=self.ps_res_bs00[59][0:7],
available_cash=self.ps_res_bs00[59][7] + 10000,
available_amounts=self.ps_res_bs00[59][0:7],
op=self.ps_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.ps_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=self.ps_res_bs00[95][7],
own_amounts=self.ps_res_bs00[95][0:7],
available_cash=self.ps_res_bs00[95][7],
available_amounts=self.ps_res_bs00[95][0:7],
op=self.ps_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.ps_res_bs00[96][7] + c_g + c_s
amounts = self.ps_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=1,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.ps_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.ps_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.ps_res_bs00[97][0:7]))
def test_loop_step_vs_sb00(self):
"""test loop step of Volume Signal type of signals"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.vs_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7750)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 500., 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[2][7],
own_amounts=self.vs_res_sb00[2][0:7],
available_cash=self.vs_res_sb00[2][7],
available_amounts=self.vs_res_sb00[2][0:7],
op=self.vs_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[2][7] + c_g + c_s
amounts = self.vs_res_sb00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[30][7],
own_amounts=self.vs_res_sb00[30][0:7],
available_cash=self.vs_res_sb00[30][7],
available_amounts=self.vs_res_sb00[30][0:7],
op=self.vs_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[30][7] + c_g + c_s
amounts = self.vs_res_sb00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[59][7] + 10000,
own_amounts=self.vs_res_sb00[59][0:7],
available_cash=self.vs_res_sb00[59][7] + 10000,
available_amounts=self.vs_res_sb00[59][0:7],
op=self.vs_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[59][7] + c_g + c_s + 10000
amounts = self.vs_res_sb00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_sb00[95][7],
own_amounts=self.vs_res_sb00[95][0:7],
available_cash=self.vs_res_sb00[95][7],
available_amounts=self.vs_res_sb00[95][0:7],
op=self.vs_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_sb00[96][7] + c_g + c_s
amounts = self.vs_res_sb00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=True,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_sb00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_sb00[97][0:7]))
def test_loop_step_vs_bs00(self):
"""test loop step of Volume Signal type of signals"""
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=10000,
own_amounts=np.zeros(7, dtype='float'),
available_cash=10000,
available_amounts=np.zeros(7, dtype='float'),
op=self.vs_signals[0],
prices=self.prices[0],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash_change: +{c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = 10000 + c_g + c_s
amounts = np.zeros(7, dtype='float') + a_p + a_s
self.assertAlmostEqual(cash, 7750)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 500., 0, 0])))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[2][7],
own_amounts=self.vs_res_bs00[2][0:7],
available_cash=self.vs_res_bs00[2][7],
available_amounts=self.vs_res_bs00[2][0:7],
op=self.vs_signals[3],
prices=self.prices[3],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[2][7] + c_g + c_s
amounts = self.vs_res_bs00[2][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[3][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[3][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[30][7],
own_amounts=self.vs_res_bs00[30][0:7],
available_cash=self.vs_res_bs00[30][7],
available_amounts=self.vs_res_bs00[30][0:7],
op=self.vs_signals[31],
prices=self.prices[31],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[30][7] + c_g + c_s
amounts = self.vs_res_bs00[30][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[31][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[31][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[59][7] + 10000,
own_amounts=self.vs_res_bs00[59][0:7],
available_cash=self.vs_res_bs00[59][7] + 10000,
available_amounts=self.vs_res_bs00[59][0:7],
op=self.vs_signals[60],
prices=self.prices[60],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[59][7] + c_g + c_s + 10000
amounts = self.vs_res_bs00[59][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[60][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[60][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[61],
prices=self.prices[61],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[61][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[61][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=self.vs_res_bs00[95][7],
own_amounts=self.vs_res_bs00[95][0:7],
available_cash=self.vs_res_bs00[95][7],
available_amounts=self.vs_res_bs00[95][0:7],
op=self.vs_signals[96],
prices=self.prices[96],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = self.vs_res_bs00[96][7] + c_g + c_s
amounts = self.vs_res_bs00[96][0:7] + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[96][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[96][0:7]))
c_g, c_s, a_p, a_s, fee = qt.core._loop_step(signal_type=2,
own_cash=cash,
own_amounts=amounts,
available_cash=cash,
available_amounts=amounts,
op=self.vs_signals[97],
prices=self.prices[97],
rate=self.rate,
pt_buy_threshold=0.1,
pt_sell_threshold=0.1,
maximize_cash_usage=False,
allow_sell_short=False,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash_change: + {c_g:.2f} / {c_s:.2f}\n'
f'amount_changed: \npurchased: {np.round(a_p, 2)}\nsold:{np.round(a_s, 2)}\n'
f'----------------------------------\n')
cash = cash + c_g + c_s
amounts = amounts + a_p + a_s
self.assertAlmostEqual(cash, self.vs_res_bs00[97][7], 2)
self.assertTrue(np.allclose(amounts, self.vs_res_bs00[97][0:7]))
def test_loop_pt(self):
""" Test looping of PT proportion target signals, with
stock delivery delay = 0 days
cash delivery delay = 0 day
buy-sell sequence = sell first
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 0 days \n'
'cash delivery delay = 0 day \n'
'buy-sell sequence = sell first')
res = apply_loop(op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
# print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.pt_res_bs00, 2))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=0,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
# print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_pt_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.pt_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.pt_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_pt_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=0,
op_list=self.pt_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.pt_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.pt_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps(self):
""" Test looping of PS Proportion Signal type of signals
"""
res = apply_loop(op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.ps_res_bs00, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.ps_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.ps_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_ps_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.ps_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.ps_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.ps_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs(self):
""" Test looping of VS Volume Signal type of signals
"""
res = apply_loop(op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res, self.vs_res_bs00, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs_with_delay(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 1 day
use_sell_cash = False
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize_cash = False (buy and sell at the same time)')
res = apply_loop(
op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
cash_delivery_period=1,
stock_delivery_period=2,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.vs_res_bs21[i]))
print()
self.assertTrue(np.allclose(res, self.vs_res_bs21, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_vs_with_delay_use_cash(self):
""" Test looping of PT proportion target signals, with:
stock delivery delay = 2 days
cash delivery delay = 0 day
use sell cash = True (sell stock first to use cash when possible
(not possible when cash delivery period != 0))
"""
print('Test looping of PT proportion target signals, with:\n'
'stock delivery delay = 2 days \n'
'cash delivery delay = 1 day \n'
'maximize cash usage = True \n'
'but not applicable because cash delivery period == 1')
res = apply_loop(
op_type=2,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
inflation_rate=0,
max_cash_usage=True,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.vs_res_sb20[i]))
print()
self.assertTrue(np.allclose(res, self.vs_res_sb20, 3))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.vs_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(
op_type=1,
op_list=self.vs_signal_hp,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=1,
stock_delivery_period=2,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
def test_loop_multiple_signal(self):
""" Test looping of PS Proportion Signal type of signals
"""
res = apply_loop(op_type=1,
op_list=self.multi_signal_hp,
history_list=self.multi_history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
cash_delivery_period=0,
stock_delivery_period=2,
max_cash_usage=True,
inflation_rate=0,
print_log=False)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}\n'
f'result comparison line by line:')
for i in range(len(res)):
print(np.around(res.values[i]))
print(np.around(self.multi_res[i]))
print()
self.assertTrue(np.allclose(res, self.multi_res, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
0,
self.ps_signal_hp,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_type=1,
op_list=self.multi_signal_hp,
history_list=self.multi_history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
cash_delivery_period=0,
stock_delivery_period=2,
max_cash_usage=False,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
class TestStrategy(unittest.TestCase):
""" test all properties and methods of strategy base class"""
def setUp(self) -> None:
pass
class TestLSStrategy(RollingTiming):
"""用于test测试的简单多空蒙板生成策略。基于RollingTiming滚动择时方法生成
该策略有两个参数,N与Price
N用于计算OHLC价格平均值的N日简单移动平均,判断,当移动平均值大于等于Price时,状态为看多,否则为看空
"""
def __init__(self):
super().__init__(stg_name='test_LS',
stg_text='test long/short strategy',
par_count=2,
par_types='discr, conti',
par_bounds_or_enums=([1, 5], [2, 10]),
data_types='close, open, high, low',
data_freq='d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
n, price = params
h = hist_data.T
avg = (h[0] + h[1] + h[2] + h[3]) / 4
ma = sma(avg, n)
if ma[-1] < price:
return 0
else:
return 1
class TestSelStrategy(SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='high, low, close',
data_freq='d',
sample_freq='10d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = np.nanmean(hist_data, axis=(1, 2))
dif = (hist_data[:, :, 2] - np.roll(hist_data[:, :, 2], 1, 1))
dif_no_nan = np.array([arr[~np.isnan(arr)][-1] for arr in dif])
difper = dif_no_nan / avg
large2 = difper.argsort()[1:]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSelStrategyDiffTime(SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
# TODO: This strategy is not working, find out why and improve
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='close, low, open',
data_freq='d',
sample_freq='w',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = hist_data.mean(axis=1).squeeze()
difper = (hist_data[:, :, 0] - np.roll(hist_data[:, :, 0], 1))[:, -1] / avg
large2 = difper.argsort()[0:2]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSigStrategy(SimpleTiming):
"""用于Test测试的简单信号生成策略,基于SimpleTiming策略生成
策略有三个参数,第一个参数为ratio,另外两个参数为price1以及price2
ratio是k线形状比例的阈值,定义为abs((C-O)/(H-L))。当这个比值小于ratio阈值时,判断该K线为十字交叉(其实还有丁字等多种情形,但这里做了
简化处理。
信号生成的规则如下:
1,当某个K线出现十字交叉,且昨收与今收之差大于price1时,买入信号
2,当某个K线出现十字交叉,且昨收与今收之差小于price2时,卖出信号
"""
def __init__(self):
super().__init__(stg_name='test_SIG',
stg_text='test signal creation strategy',
par_count=3,
par_types='conti, conti, conti',
par_bounds_or_enums=([2, 10], [0, 3], [0, 3]),
data_types='close, open, high, low',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
r, price1, price2 = params
h = hist_data.T
ratio = np.abs((h[0] - h[1]) / (h[3] - h[2]))
diff = h[0] - np.roll(h[0], 1)
sig = np.where((ratio < r) & (diff > price1),
1,
np.where((ratio < r) & (diff < price2), -1, 0))
return sig
class MyStg(qt.RollingTiming):
"""自定义双均线择时策略策略"""
def __init__(self):
"""这个均线择时策略只有三个参数:
- SMA 慢速均线,所选择的股票
- FMA 快速均线
- M 边界值
策略的其他说明
"""
"""
必须初始化的关键策略参数清单:
"""
super().__init__(
pars=(20, 100, 0.01),
par_count=3,
par_types=['discr', 'discr', 'conti'],
par_bounds_or_enums=[(10, 250), (10, 250), (0.0, 0.5)],
stg_name='CUSTOM ROLLING TIMING STRATEGY',
stg_text='Customized Rolling Timing Strategy for Testing',
data_types='close',
window_length=100,
)
print(f'=====================\n====================\n'
f'custom strategy initialized, \npars: {self.pars}\npar_count:{self.par_count}\npar_types:'
f'{self.par_types}\n'
f'{self.info()}')
# 策略的具体实现代码写在策略的_realize()函数中
# 这个函数固定接受两个参数: hist_price代表特定组合的历史数据, params代表具体的策略参数
def _realize(self, hist_price, params):
"""策略的具体实现代码:
s:短均线计算日期;l:长均线计算日期;m:均线边界宽度;hesitate:均线跨越类型"""
f, s, m = params
# 临时处理措施,在策略实现层对传入的数据切片,后续应该在策略实现层以外事先对数据切片,保证传入的数据符合data_types参数即可
h = hist_price.T
# 计算长短均线的当前值
s_ma = qt.sma(h[0], s)[-1]
f_ma = qt.sma(h[0], f)[-1]
# 计算慢均线的停止边界,当快均线在停止边界范围内时,平仓,不发出买卖信号
s_ma_u = s_ma * (1 + m)
s_ma_l = s_ma * (1 - m)
# 根据观望模式在不同的点位产生Long/short/empty标记
if f_ma > s_ma_u: # 当快均线在慢均线停止范围以上时,持有多头头寸
return 1
elif s_ma_l < f_ma < s_ma_u: # 当均线在停止边界以内时,平仓
return 0
else: # f_ma < s_ma_l 当快均线在慢均线停止范围以下时,持有空头头寸
return -1
class TestOperator(unittest.TestCase):
"""全面测试Operator对象的所有功能。包括:
1, Strategy 参数的设置
2, 历史数据的获取与分配提取
3, 策略优化参数的批量设置和优化空间的获取
4, 策略输出值的正确性验证
5, 策略结果的混合结果确认
"""
def setUp(self):
"""prepare data for Operator test"""
print('start testing HistoryPanel object\n')
# build up test data: a 4-type, 3-share, 50-day matrix of prices that contains nan values in some days
# for some share_pool
# for share1:
data_rows = 50
share1_close = [10.04, 10, 10, 9.99, 9.97, 9.99, 10.03, 10.03, 10.06, 10.06, 10.11,
10.09, 10.07, 10.06, 10.09, 10.03, 10.03, 10.06, 10.08, 10, 9.99,
10.03, 10.03, 10.06, 10.03, 9.97, 9.94, 9.83, 9.77, 9.84, 9.91, 9.93,
9.96, 9.91, 9.91, 9.88, 9.91, 9.64, 9.56, 9.57, 9.55, 9.57, 9.61, 9.61,
9.55, 9.57, 9.63, 9.64, 9.65, 9.62]
share1_open = [10.02, 10, 9.98, 9.97, 9.99, 10.01, 10.04, 10.06, 10.06, 10.11,
10.11, 10.07, 10.06, 10.09, 10.03, 10.02, 10.06, 10.08, 9.99, 10,
10.03, 10.02, 10.06, 10.03, 9.97, 9.94, 9.83, 9.78, 9.77, 9.91, 9.92,
9.97, 9.91, 9.9, 9.88, 9.91, 9.63, 9.64, 9.57, 9.55, 9.58, 9.61, 9.62,
9.55, 9.57, 9.61, 9.63, 9.64, 9.61, 9.56]
share1_high = [10.07, 10, 10, 10, 10.03, 10.03, 10.04, 10.09, 10.1, 10.14, 10.11, 10.1,
10.09, 10.09, 10.1, 10.05, 10.07, 10.09, 10.1, 10, 10.04, 10.04, 10.06,
10.09, 10.05, 9.97, 9.96, 9.86, 9.77, 9.92, 9.94, 9.97, 9.97, 9.92, 9.92,
9.92, 9.93, 9.64, 9.58, 9.6, 9.58, 9.62, 9.62, 9.64, 9.59, 9.62, 9.63,
9.7, 9.66, 9.64]
share1_low = [9.99, 10, 9.97, 9.97, 9.97, 9.98, 9.99, 10.03, 10.03, 10.04, 10.11, 10.07,
10.05, 10.03, 10.03, 10.01, 9.99, 10.03, 9.95, 10, 9.95, 10, 10.01, 9.99,
9.96, 9.89, 9.83, 9.77, 9.77, 9.8, 9.9, 9.91, 9.89, 9.89, 9.87, 9.85, 9.6,
9.64, 9.53, 9.55, 9.54, 9.55, 9.58, 9.54, 9.53, 9.53, 9.63, 9.64, 9.59, 9.56]
# for share2:
share2_close = [9.68, 9.87, 9.86, 9.87, 9.79, 9.82, 9.8, 9.66, 9.62, 9.58, 9.69, 9.78, 9.75,
9.96, 9.9, 10.04, 10.06, 10.08, 10.24, 10.24, 10.24, 9.86, 10.13, 10.12,
10.1, 10.25, 10.24, 10.22, 10.75, 10.64, 10.56, 10.6, 10.42, 10.25, 10.24,
10.49, 10.57, 10.63, 10.48, 10.37, 10.96, 11.02, np.nan, np.nan, 10.88, 10.87, 11.01,
11.01, 11.58, 11.8]
share2_open = [9.88, 9.88, 9.89, 9.75, 9.74, 9.8, 9.62, 9.65, 9.58, 9.67, 9.81, 9.8, 10,
9.95, 10.1, 10.06, 10.14, 9.9, 10.2, 10.29, 9.86, 9.48, 10.01, 10.24, 10.26,
10.24, 10.12, 10.65, 10.64, 10.56, 10.42, 10.43, 10.29, 10.3, 10.44, 10.6,
10.67, 10.46, 10.39, 10.9, 11.01, 11.01, np.nan, np.nan, 10.82, 11.02, 10.96,
11.55, 11.74, 11.8]
share2_high = [9.91, 10.04, 9.93, 10.04, 9.84, 9.88, 9.99, 9.7, 9.67, 9.71, 9.85, 9.9, 10,
10.2, 10.11, 10.18, 10.21, 10.26, 10.38, 10.47, 10.42, 10.07, 10.24, 10.27,
10.38, 10.43, 10.39, 10.65, 10.84, 10.65, 10.73, 10.63, 10.51, 10.35, 10.46,
10.63, 10.74, 10.76, 10.54, 11.02, 11.12, 11.17, np.nan, np.nan, 10.92, 11.15,
11.11, 11.55, 11.95, 11.93]
share2_low = [9.63, 9.84, 9.81, 9.74, 9.67, 9.72, 9.57, 9.54, 9.51, 9.47, 9.68, 9.63, 9.75,
9.65, 9.9, 9.93, 10.03, 9.8, 10.14, 10.09, 9.78, 9.21, 9.11, 9.68, 10.05,
10.12, 9.89, 9.89, 10.59, 10.43, 10.34, 10.32, 10.21, 10.2, 10.18, 10.36,
10.51, 10.41, 10.32, 10.37, 10.87, 10.95, np.nan, np.nan, 10.65, 10.71, 10.75,
10.91, 11.31, 11.58]
# for share3:
share3_close = [6.64, 7.26, 7.03, 6.87, np.nan, 6.64, 6.85, 6.7, 6.39, 6.22, 5.92, 5.91, 6.11,
5.91, 6.23, 6.28, 6.28, 6.27, np.nan, 5.56, 5.67, 5.16, 5.69, 6.32, 6.14, 6.25,
5.79, 5.26, 5.05, 5.45, 6.06, 6.21, 5.69, 5.46, 6.02, 6.69, 7.43, 7.72, 8.16,
7.83, 8.7, 8.71, 8.88, 8.54, 8.87, 8.87, 8.18, 7.8, 7.97, 8.25]
share3_open = [7.26, 7, 6.88, 6.91, np.nan, 6.81, 6.63, 6.45, 6.16, 6.24, 5.96, 5.97, 5.96,
6.2, 6.35, 6.11, 6.37, 5.58, np.nan, 5.65, 5.19, 5.42, 6.3, 6.15, 6.05, 5.89,
5.22, 5.2, 5.07, 6.04, 6.12, 5.85, 5.67, 6.02, 6.04, 7.07, 7.64, 7.99, 7.59,
8.73, 8.72, 8.97, 8.58, 8.71, 8.77, 8.4, 7.95, 7.76, 8.25, 7.51]
share3_high = [7.41, 7.31, 7.14, 7, np.nan, 6.82, 6.96, 6.85, 6.5, 6.34, 6.04, 6.02, 6.12, 6.38,
6.43, 6.46, 6.43, 6.27, np.nan, 6.01, 5.67, 5.67, 6.35, 6.32, 6.43, 6.36, 5.79,
5.47, 5.65, 6.04, 6.14, 6.23, 5.83, 6.25, 6.27, 7.12, 7.82, 8.14, 8.27, 8.92,
8.76, 9.15, 8.9, 9.01, 9.16, 9, 8.27, 7.99, 8.33, 8.25]
share3_low = [6.53, 6.87, 6.83, 6.7, np.nan, 6.63, 6.57, 6.41, 6.15, 6.07, 5.89, 5.82, 5.73, 5.81,
6.1, 6.06, 6.16, 5.57, np.nan, 5.51, 5.19, 5.12, 5.69, 6.01, 5.97, 5.86, 5.18, 5.19,
4.96, 5.45, 5.84, 5.85, 5.28, 5.42, 6.02, 6.69, 7.28, 7.64, 7.25, 7.83, 8.41, 8.66,
8.53, 8.54, 8.73, 8.27, 7.95, 7.67, 7.8, 7.51]
# for sel_finance test
shares_eps = np.array([[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, 0.2, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[0.1, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.3, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.15, np.nan, np.nan],
[np.nan, 0.1, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.2, np.nan, np.nan],
[np.nan, 0.5, np.nan],
[0.4, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[0.9, np.nan, np.nan],
[np.nan, np.nan, 0.1]])
self.date_indices = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14', '2016-07-15', '2016-07-18',
'2016-07-19', '2016-07-20', '2016-07-21', '2016-07-22',
'2016-07-25', '2016-07-26', '2016-07-27', '2016-07-28',
'2016-07-29', '2016-08-01', '2016-08-02', '2016-08-03',
'2016-08-04', '2016-08-05', '2016-08-08', '2016-08-09',
'2016-08-10', '2016-08-11', '2016-08-12', '2016-08-15',
'2016-08-16', '2016-08-17', '2016-08-18', '2016-08-19',
'2016-08-22', '2016-08-23', '2016-08-24', '2016-08-25',
'2016-08-26', '2016-08-29', '2016-08-30', '2016-08-31',
'2016-09-01', '2016-09-02', '2016-09-05', '2016-09-06',
'2016-09-07', '2016-09-08']
self.shares = ['000010', '000030', '000039']
self.types = ['close', 'open', 'high', 'low']
self.sel_finance_tyeps = ['eps']
self.test_data_3D = np.zeros((3, data_rows, 4))
self.test_data_2D = np.zeros((data_rows, 3))
self.test_data_2D2 = np.zeros((data_rows, 4))
self.test_data_sel_finance = np.empty((3, data_rows, 1))
# Build up 3D data
self.test_data_3D[0, :, 0] = share1_close
self.test_data_3D[0, :, 1] = share1_open
self.test_data_3D[0, :, 2] = share1_high
self.test_data_3D[0, :, 3] = share1_low
self.test_data_3D[1, :, 0] = share2_close
self.test_data_3D[1, :, 1] = share2_open
self.test_data_3D[1, :, 2] = share2_high
self.test_data_3D[1, :, 3] = share2_low
self.test_data_3D[2, :, 0] = share3_close
self.test_data_3D[2, :, 1] = share3_open
self.test_data_3D[2, :, 2] = share3_high
self.test_data_3D[2, :, 3] = share3_low
self.test_data_sel_finance[:, :, 0] = shares_eps.T
self.hp1 = qt.HistoryPanel(values=self.test_data_3D,
levels=self.shares,
columns=self.types,
rows=self.date_indices)
print(f'in test Operator, history panel is created for timing test')
self.hp1.info()
self.hp2 = qt.HistoryPanel(values=self.test_data_sel_finance,
levels=self.shares,
columns=self.sel_finance_tyeps,
rows=self.date_indices)
print(f'in test_Operator, history panel is created for selection finance test:')
self.hp2.info()
self.op = qt.Operator(strategies='dma', signal_type='PS')
self.op2 = qt.Operator(strategies='dma, macd, trix')
def test_init(self):
""" test initialization of Operator class"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.signal_type, 'pt')
self.assertIsInstance(op.strategies, list)
self.assertEqual(len(op.strategies), 0)
op = qt.Operator('dma')
self.assertIsInstance(op, qt.Operator)
self.assertIsInstance(op.strategies, list)
self.assertIsInstance(op.strategies[0], TimingDMA)
op = qt.Operator('dma, macd')
self.assertIsInstance(op, qt.Operator)
op = qt.Operator(['dma', 'macd'])
self.assertIsInstance(op, qt.Operator)
def test_repr(self):
""" test basic representation of Opeartor class"""
op = qt.Operator()
self.assertEqual(op.__repr__(), 'Operator()')
op = qt.Operator('macd, dma, trix, random, avg_low')
self.assertEqual(op.__repr__(), 'Operator(macd, dma, trix, random, avg_low)')
self.assertEqual(op['dma'].__repr__(), 'Q-TIMING(DMA)')
self.assertEqual(op['macd'].__repr__(), 'R-TIMING(MACD)')
self.assertEqual(op['trix'].__repr__(), 'R-TIMING(TRIX)')
self.assertEqual(op['random'].__repr__(), 'SELECT(RANDOM)')
self.assertEqual(op['avg_low'].__repr__(), 'FACTOR(AVG LOW)')
def test_info(self):
"""Test information output of Operator"""
print(f'test printing information of operator object')
self.op.info()
def test_get_strategy_by_id(self):
""" test get_strategy_by_id()"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
self.assertEqual(op.strategy_ids, ['macd', 'dma', 'trix'])
self.assertIs(op.get_strategy_by_id('macd'), op.strategies[0])
self.assertIs(op.get_strategy_by_id(1), op.strategies[1])
self.assertIs(op.get_strategy_by_id('trix'), op.strategies[2])
def test_get_items(self):
""" test method __getitem__(), it should be the same as geting strategies by id"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
self.assertEqual(op.strategy_ids, ['macd', 'dma', 'trix'])
self.assertIs(op['macd'], op.strategies[0])
self.assertIs(op['trix'], op.strategies[2])
self.assertIs(op[1], op.strategies[1])
self.assertIs(op[3], op.strategies[2])
def test_get_strategies_by_price_type(self):
""" test get_strategies_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategies_by_price_type('close')
stg_open = op.get_strategies_by_price_type('open')
stg_high = op.get_strategies_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, [op.strategies[1]])
self.assertEqual(stg_open, [op.strategies[0], op.strategies[2]])
self.assertEqual(stg_high, [])
stg_wrong = op.get_strategies_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_get_strategy_count_by_price_type(self):
""" test get_strategy_count_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_count_by_price_type('close')
stg_open = op.get_strategy_count_by_price_type('open')
stg_high = op.get_strategy_count_by_price_type('high')
self.assertIsInstance(stg_close, int)
self.assertIsInstance(stg_open, int)
self.assertIsInstance(stg_high, int)
self.assertEqual(stg_close, 1)
self.assertEqual(stg_open, 2)
self.assertEqual(stg_high, 0)
stg_wrong = op.get_strategy_count_by_price_type(123)
self.assertIsInstance(stg_wrong, int)
self.assertEqual(stg_wrong, 0)
def test_get_strategy_names_by_price_type(self):
""" test get_strategy_names_by_price_type"""
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_names_by_price_type('close')
stg_open = op.get_strategy_names_by_price_type('open')
stg_high = op.get_strategy_names_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, ['DMA'])
self.assertEqual(stg_open, ['MACD', 'TRIX'])
self.assertEqual(stg_high, [])
stg_wrong = op.get_strategy_names_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_get_strategy_id_by_price_type(self):
""" test get_strategy_IDs_by_price_type"""
print('-----Test get strategy IDs by price type------\n')
op = qt.Operator()
self.assertIsInstance(op, qt.Operator)
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op = qt.Operator('macd, dma, trix')
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='close')
op.set_parameter('trix', price_type='open')
stg_close = op.get_strategy_id_by_price_type('close')
stg_open = op.get_strategy_id_by_price_type('open')
stg_high = op.get_strategy_id_by_price_type('high')
self.assertIsInstance(stg_close, list)
self.assertIsInstance(stg_open, list)
self.assertIsInstance(stg_high, list)
self.assertEqual(stg_close, ['dma'])
self.assertEqual(stg_open, ['macd', 'trix'])
self.assertEqual(stg_high, [])
op.add_strategies('dma, macd')
op.set_parameter('dma_1', price_type='open')
op.set_parameter('macd', price_type='open')
op.set_parameter('macd_1', price_type='high')
op.set_parameter('trix', price_type='close')
print(f'Operator strategy id:\n'
f'{op.strategies} on memory pos:\n'
f'{[id(stg) for stg in op.strategies]}')
stg_close = op.get_strategy_id_by_price_type('close')
stg_open = op.get_strategy_id_by_price_type('open')
stg_high = op.get_strategy_id_by_price_type('high')
stg_all = op.get_strategy_id_by_price_type()
print(f'All IDs of strategies:\n'
f'{stg_all}\n'
f'All price types of strategies:\n'
f'{[stg.price_type for stg in op.strategies]}')
self.assertEqual(stg_close, ['dma', 'trix'])
self.assertEqual(stg_open, ['macd', 'dma_1'])
self.assertEqual(stg_high, ['macd_1'])
stg_wrong = op.get_strategy_id_by_price_type(123)
self.assertIsInstance(stg_wrong, list)
self.assertEqual(stg_wrong, [])
def test_property_strategies(self):
""" test property strategies"""
print(f'created a new simple Operator with only one strategy: DMA')
op = qt.Operator('dma')
strategies = op.strategies
self.assertIsInstance(strategies, list)
op.info()
print(f'created the second simple Operator with three strategies')
self.assertIsInstance(strategies[0], TimingDMA)
op = qt.Operator('dma, macd, cdl')
strategies = op.strategies
op.info()
self.assertIsInstance(strategies, list)
self.assertIsInstance(strategies[0], TimingDMA)
self.assertIsInstance(strategies[1], TimingMACD)
self.assertIsInstance(strategies[2], TimingCDL)
def test_property_strategy_count(self):
""" test Property strategy_count, and the method get_strategy_count_by_price_type()"""
self.assertEqual(self.op.strategy_count, 1)
self.assertEqual(self.op2.strategy_count, 3)
self.assertEqual(self.op.get_strategy_count_by_price_type(), 1)
self.assertEqual(self.op2.get_strategy_count_by_price_type(), 3)
self.assertEqual(self.op.get_strategy_count_by_price_type('close'), 1)
self.assertEqual(self.op.get_strategy_count_by_price_type('high'), 0)
self.assertEqual(self.op2.get_strategy_count_by_price_type('close'), 3)
self.assertEqual(self.op2.get_strategy_count_by_price_type('open'), 0)
def test_property_strategy_names(self):
""" test property strategy_ids"""
op = qt.Operator('dma')
self.assertIsInstance(op.strategy_ids, list)
names = op.strategy_ids[0]
print(f'names are {names}')
self.assertEqual(names, 'dma')
op = qt.Operator('dma, macd, trix, cdl')
self.assertIsInstance(op.strategy_ids, list)
self.assertEqual(op.strategy_ids[0], 'dma')
self.assertEqual(op.strategy_ids[1], 'macd')
self.assertEqual(op.strategy_ids[2], 'trix')
self.assertEqual(op.strategy_ids[3], 'cdl')
op = qt.Operator('dma, macd, trix, dma, dma')
self.assertIsInstance(op.strategy_ids, list)
self.assertEqual(op.strategy_ids[0], 'dma')
self.assertEqual(op.strategy_ids[1], 'macd')
self.assertEqual(op.strategy_ids[2], 'trix')
self.assertEqual(op.strategy_ids[3], 'dma_1')
self.assertEqual(op.strategy_ids[4], 'dma_2')
def test_property_strategy_blenders(self):
""" test property strategy blenders including property setter,
and test the method get_blender()"""
print(f'------- Test property strategy blenders ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'pt')
# test adding blender to empty operator
op.strategy_blenders = '1 + 2'
op.signal_type = 'proportion signal'
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'ps')
op.add_strategy('dma')
op.strategy_blenders = '1+2'
self.assertEqual(op.strategy_blenders, {'close': ['+', '2', '1']})
op.clear_strategies()
self.assertEqual(op.strategy_blenders, {})
op.add_strategies('dma, trix, macd, dma')
op.set_parameter('dma', price_type='open')
op.set_parameter('trix', price_type='high')
op.set_blender('open', '1+2')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '2', '1'])
self.assertEqual(blender_close, None)
self.assertEqual(blender_high, None)
op.set_blender('open', '1+2+3')
op.set_blender('abc', '1+2+3')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
blender_abc = op.get_blender('abc')
self.assertEqual(op.strategy_blenders, {'open': ['+', '3', '+', '2', '1']})
self.assertEqual(blender_open, ['+', '3', '+', '2', '1'])
self.assertEqual(blender_close, None)
self.assertEqual(blender_high, None)
self.assertEqual(blender_abc, None)
op.set_blender('open', 123)
blender_open = op.get_blender('open')
self.assertEqual(blender_open, [])
op.set_blender(None, '1+1')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(op.bt_price_types, ['close', 'high', 'open'])
self.assertEqual(op.get_blender(), {'close': ['+', '1', '1'],
'open': ['+', '1', '1'],
'high': ['+', '1', '1']})
self.assertEqual(blender_open, ['+', '1', '1'])
self.assertEqual(blender_close, ['+', '1', '1'])
self.assertEqual(blender_high, ['+', '1', '1'])
op.set_blender(None, ['1+1', '3+4'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '3'])
self.assertEqual(blender_close, ['+', '1', '1'])
self.assertEqual(blender_high, ['+', '4', '3'])
self.assertEqual(op.view_blender('open'), '3+4')
self.assertEqual(op.view_blender('close'), '1+1')
self.assertEqual(op.view_blender('high'), '3+4')
op.strategy_blenders = (['1+2', '2*3', '1+4'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
self.assertEqual(op.view_blender('open'), '1+4')
self.assertEqual(op.view_blender('close'), '1+2')
self.assertEqual(op.view_blender('high'), '2*3')
# test error inputs:
# wrong type of price_type
self.assertRaises(TypeError, op.set_blender, 1, '1+3')
# price_type not found, no change is made
op.set_blender('volume', '1+3')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# price_type not valid, no change is made
op.set_blender('closee', '1+2')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, ['+', '4', '1'])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# wrong type of blender, set to empty list
op.set_blender('open', 55)
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, ['+', '2', '1'])
self.assertEqual(blender_high, ['*', '3', '2'])
# wrong type of blender, set to empty list
op.set_blender('close', ['1+2'])
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, [])
self.assertEqual(blender_high, ['*', '3', '2'])
# can't parse blender, set to empty list
op.set_blender('high', 'a+bc')
blender_open = op.get_blender('open')
blender_close = op.get_blender('close')
blender_high = op.get_blender('high')
self.assertEqual(blender_open, [])
self.assertEqual(blender_close, [])
self.assertEqual(blender_high, [])
def test_property_singal_type(self):
""" test property signal_type"""
op = qt.Operator()
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.signal_type, 'pt')
op = qt.Operator(signal_type='ps')
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator(signal_type='PS')
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator(signal_type='proportion signal')
self.assertEqual(op.signal_type, 'ps')
print(f'"pt" will be the default type if wrong value is given')
op = qt.Operator(signal_type='wrong value')
self.assertEqual(op.signal_type, 'pt')
print(f'test signal_type.setter')
op.signal_type = 'ps'
self.assertEqual(op.signal_type, 'ps')
print(f'test error raising')
self.assertRaises(TypeError, setattr, op, 'signal_type', 123)
self.assertRaises(ValueError, setattr, op, 'signal_type', 'wrong value')
def test_property_op_data_types(self):
""" test property op_data_types"""
op = qt.Operator()
self.assertIsInstance(op.op_data_types, list)
self.assertEqual(op.op_data_types, [])
op = qt.Operator('macd, dma, trix')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
op = qt.Operator('macd, cdl')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
self.assertEqual(dt[1], 'high')
self.assertEqual(dt[2], 'low')
self.assertEqual(dt[3], 'open')
self.assertEqual(dt, ['close', 'high', 'low', 'open'])
op.add_strategy('dma')
dt = op.op_data_types
self.assertEqual(dt[0], 'close')
self.assertEqual(dt[1], 'high')
self.assertEqual(dt[2], 'low')
self.assertEqual(dt[3], 'open')
self.assertEqual(dt, ['close', 'high', 'low', 'open'])
def test_property_op_data_type_count(self):
""" test property op_data_type_count"""
op = qt.Operator()
self.assertIsInstance(op.op_data_type_count, int)
self.assertEqual(op.op_data_type_count, 0)
op = qt.Operator('macd, dma, trix')
dtn = op.op_data_type_count
self.assertEqual(dtn, 1)
op = qt.Operator('macd, cdl')
dtn = op.op_data_type_count
self.assertEqual(dtn, 4)
op.add_strategy('dma')
dtn = op.op_data_type_count
self.assertEqual(dtn, 4)
def test_property_op_data_freq(self):
""" test property op_data_freq"""
op = qt.Operator()
self.assertIsInstance(op.op_data_freq, str)
self.assertEqual(len(op.op_data_freq), 0)
self.assertEqual(op.op_data_freq, '')
op = qt.Operator('macd, dma, trix')
dtf = op.op_data_freq
self.assertIsInstance(dtf, str)
self.assertEqual(dtf[0], 'd')
op.set_parameter('macd', data_freq='m')
dtf = op.op_data_freq
self.assertIsInstance(dtf, list)
self.assertEqual(len(dtf), 2)
self.assertEqual(dtf[0], 'd')
self.assertEqual(dtf[1], 'm')
def test_property_bt_price_types(self):
""" test property bt_price_types"""
print('------test property bt_price_tyeps-------')
op = qt.Operator()
self.assertIsInstance(op.bt_price_types, list)
self.assertEqual(len(op.bt_price_types), 0)
self.assertEqual(op.bt_price_types, [])
op = qt.Operator('macd, dma, trix')
btp = op.bt_price_types
self.assertIsInstance(btp, list)
self.assertEqual(btp[0], 'close')
op.set_parameter('macd', price_type='open')
btp = op.bt_price_types
btpc = op.bt_price_type_count
print(f'price_types are \n{btp}')
self.assertIsInstance(btp, list)
self.assertEqual(len(btp), 2)
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
op.add_strategies(['dma', 'macd'])
op.set_parameter('dma_1', price_type='high')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'high')
self.assertEqual(btp[2], 'open')
self.assertEqual(btpc, 3)
op.remove_strategy('dma_1')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
op.remove_strategy('macd_1')
btp = op.bt_price_types
btpc = op.bt_price_type_count
self.assertEqual(btp[0], 'close')
self.assertEqual(btp[1], 'open')
self.assertEqual(btpc, 2)
def test_property_op_data_type_list(self):
""" test property op_data_type_list"""
op = qt.Operator()
self.assertIsInstance(op.op_data_type_list, list)
self.assertEqual(len(op.op_data_type_list), 0)
self.assertEqual(op.op_data_type_list, [])
op = qt.Operator('macd, dma, trix, cdl')
ohd = op.op_data_type_list
print(f'ohd is {ohd}')
self.assertIsInstance(ohd, list)
self.assertEqual(ohd[0], ['close'])
op.set_parameter('macd', data_types='open, close')
ohd = op.op_data_type_list
print(f'ohd is {ohd}')
self.assertIsInstance(ohd, list)
self.assertEqual(len(ohd), 4)
self.assertEqual(ohd[0], ['open', 'close'])
self.assertEqual(ohd[1], ['close'])
self.assertEqual(ohd[2], ['close'])
self.assertEqual(ohd[3], ['open', 'high', 'low', 'close'])
def test_property_op_history_data(self):
""" Test this important function to get operation history data that shall be used in
signal generation
these data are stored in list of nd-arrays, each ndarray represents the data
that is needed for each and every strategy
"""
print(f'------- Test getting operation history data ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.op_history_data, {})
self.assertEqual(op.signal_type, 'pt')
def test_property_opt_space_par(self):
""" test property opt_space_par"""
print(f'-----test property opt_space_par--------:\n')
op = qt.Operator()
self.assertIsInstance(op.opt_space_par, tuple)
self.assertIsInstance(op.opt_space_par[0], list)
self.assertIsInstance(op.opt_space_par[1], list)
self.assertEqual(len(op.opt_space_par), 2)
self.assertEqual(op.opt_space_par, ([], []))
op = qt.Operator('macd, dma, trix, cdl')
osp = op.opt_space_par
print(f'before setting opt_tags opt_space_par is empty:\n'
f'osp is {osp}\n')
self.assertIsInstance(osp, tuple)
self.assertEqual(osp[0], [])
self.assertEqual(osp[1], [])
op.set_parameter('macd', opt_tag=1)
op.set_parameter('dma', opt_tag=1)
osp = op.opt_space_par
print(f'after setting opt_tags opt_space_par is not empty:\n'
f'osp is {osp}\n')
self.assertIsInstance(osp, tuple)
self.assertEqual(len(osp), 2)
self.assertIsInstance(osp[0], list)
self.assertIsInstance(osp[1], list)
self.assertEqual(len(osp[0]), 6)
self.assertEqual(len(osp[1]), 6)
self.assertEqual(osp[0], [(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
self.assertEqual(osp[1], ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
def test_property_opt_types(self):
""" test property opt_tags"""
print(f'-----test property opt_tags--------:\n')
op = qt.Operator()
self.assertIsInstance(op.opt_tags, list)
self.assertEqual(len(op.opt_tags), 0)
self.assertEqual(op.opt_tags, [])
op = qt.Operator('macd, dma, trix, cdl')
otp = op.opt_tags
print(f'before setting opt_tags opt_space_par is empty:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, list)
self.assertEqual(otp, [0, 0, 0, 0])
op.set_parameter('macd', opt_tag=1)
op.set_parameter('dma', opt_tag=1)
otp = op.opt_tags
print(f'after setting opt_tags opt_space_par is not empty:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, list)
self.assertEqual(len(otp), 4)
self.assertEqual(otp, [1, 1, 0, 0])
def test_property_max_window_length(self):
""" test property max_window_length"""
print(f'-----test property max window length--------:\n')
op = qt.Operator()
self.assertIsInstance(op.max_window_length, int)
self.assertEqual(op.max_window_length, 0)
op = qt.Operator('macd, dma, trix, cdl')
mwl = op.max_window_length
print(f'before setting window_length the value is 270:\n'
f'mwl is {mwl}\n')
self.assertIsInstance(mwl, int)
self.assertEqual(mwl, 270)
op.set_parameter('macd', window_length=300)
op.set_parameter('dma', window_length=350)
mwl = op.max_window_length
print(f'after setting window_length the value is new set value:\n'
f'mwl is {mwl}\n')
self.assertIsInstance(mwl, int)
self.assertEqual(mwl, 350)
def test_property_bt_price_type_count(self):
""" test property bt_price_type_count"""
print(f'-----test property bt_price_type_count--------:\n')
op = qt.Operator()
self.assertIsInstance(op.bt_price_type_count, int)
self.assertEqual(op.bt_price_type_count, 0)
op = qt.Operator('macd, dma, trix, cdl')
otp = op.bt_price_type_count
print(f'before setting price_type the price count is 1:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, int)
self.assertEqual(otp, 1)
op.set_parameter('macd', price_type='open')
op.set_parameter('dma', price_type='open')
otp = op.bt_price_type_count
print(f'after setting price_type the price type count is 2:\n'
f'otp is {otp}\n')
self.assertIsInstance(otp, int)
self.assertEqual(otp, 2)
def test_property_set(self):
""" test all property setters:
setting following properties:
- strategy_blenders
- signal_type
other properties can not be set"""
print(f'------- Test setting properties ---------')
op = qt.Operator()
self.assertIsInstance(op.strategy_blenders, dict)
self.assertIsInstance(op.signal_type, str)
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'pt')
op.strategy_blenders = '1 + 2'
op.signal_type = 'proportion signal'
self.assertEqual(op.strategy_blenders, {})
self.assertEqual(op.signal_type, 'ps')
op = qt.Operator('macd, dma, trix, cdl')
# TODO: 修改set_parameter(),使下面的用法成立
# a_to_sell.set_parameter('dma, cdl', price_type='open')
op.set_parameter('dma', price_type='open')
op.set_parameter('cdl', price_type='open')
sb = op.strategy_blenders
st = op.signal_type
self.assertIsInstance(sb, dict)
print(f'before setting: strategy_blenders={sb}')
self.assertEqual(sb, {})
op.strategy_blenders = '1+2 * 3'
sb = op.strategy_blenders
print(f'after setting strategy_blender={sb}')
self.assertEqual(sb, {'close': ['+', '*', '3', '2', '1'],
'open': ['+', '*', '3', '2', '1']})
op.strategy_blenders = ['1+2', '3-4']
sb = op.strategy_blenders
print(f'after setting strategy_blender={sb}')
self.assertEqual(sb, {'close': ['+', '2', '1'],
'open': ['-', '4', '3']})
def test_operator_ready(self):
"""test the method ready of Operator"""
op = qt.Operator()
print(f'operator is ready? "{op.ready}"')
def test_operator_add_strategy(self):
"""test adding strategies to Operator"""
op = qt.Operator('dma, all, urgent')
self.assertIsInstance(op, qt.Operator)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[1], qt.SelectingAll)
self.assertIsInstance(op.strategies[2], qt.RiconUrgent)
self.assertIsInstance(op[0], qt.TimingDMA)
self.assertIsInstance(op[1], qt.SelectingAll)
self.assertIsInstance(op[2], qt.RiconUrgent)
self.assertIsInstance(op['dma'], qt.TimingDMA)
self.assertIsInstance(op['all'], qt.SelectingAll)
self.assertIsInstance(op['urgent'], qt.RiconUrgent)
self.assertEqual(op.strategy_count, 3)
print(f'test adding strategies into existing op')
print('test adding strategy by string')
op.add_strategy('macd')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[3], qt.TimingMACD)
self.assertEqual(op.strategy_count, 4)
op.add_strategy('random')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[4], qt.SelectingRandom)
self.assertEqual(op.strategy_count, 5)
test_ls = TestLSStrategy()
op.add_strategy(test_ls)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[5], TestLSStrategy)
self.assertEqual(op.strategy_count, 6)
print(f'Test different instance of objects are added to operator')
op.add_strategy('dma')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[6], qt.TimingDMA)
self.assertIsNot(op.strategies[0], op.strategies[6])
def test_operator_add_strategies(self):
""" etst adding multiple strategies to Operator"""
op = qt.Operator('dma, all, urgent')
self.assertEqual(op.strategy_count, 3)
print('test adding multiple strategies -- adding strategy by list of strings')
op.add_strategies(['dma', 'macd'])
self.assertEqual(op.strategy_count, 5)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[3], qt.TimingDMA)
self.assertIsInstance(op.strategies[4], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by comma separated strings')
op.add_strategies('dma, macd')
self.assertEqual(op.strategy_count, 7)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[5], qt.TimingDMA)
self.assertIsInstance(op.strategies[6], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by list of strategies')
op.add_strategies([qt.TimingDMA(), qt.TimingMACD()])
self.assertEqual(op.strategy_count, 9)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[7], qt.TimingDMA)
self.assertIsInstance(op.strategies[8], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by list of strategy and str')
op.add_strategies(['DMA', qt.TimingMACD()])
self.assertEqual(op.strategy_count, 11)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[9], qt.TimingDMA)
self.assertIsInstance(op.strategies[10], qt.TimingMACD)
self.assertIsNot(op.strategies[0], op.strategies[9])
self.assertIs(type(op.strategies[0]), type(op.strategies[9]))
print('test adding fault data')
self.assertRaises(AssertionError, op.add_strategies, 123)
self.assertRaises(AssertionError, op.add_strategies, None)
def test_opeartor_remove_strategy(self):
""" test method remove strategy"""
op = qt.Operator('dma, all, urgent')
op.add_strategies(['dma', 'macd'])
op.add_strategies(['DMA', TestLSStrategy()])
self.assertEqual(op.strategy_count, 7)
print('test removing strategies from Operator')
op.remove_strategy('dma')
self.assertEqual(op.strategy_count, 6)
self.assertEqual(op.strategy_ids, ['all', 'urgent', 'dma_1', 'macd', 'dma_2', 'custom'])
self.assertEqual(op.strategies[0], op['all'])
self.assertEqual(op.strategies[1], op['urgent'])
self.assertEqual(op.strategies[2], op['dma_1'])
self.assertEqual(op.strategies[3], op['macd'])
self.assertEqual(op.strategies[4], op['dma_2'])
self.assertEqual(op.strategies[5], op['custom'])
op.remove_strategy('dma_1')
self.assertEqual(op.strategy_count, 5)
self.assertEqual(op.strategy_ids, ['all', 'urgent', 'macd', 'dma_2', 'custom'])
self.assertEqual(op.strategies[0], op['all'])
self.assertEqual(op.strategies[1], op['urgent'])
self.assertEqual(op.strategies[2], op['macd'])
self.assertEqual(op.strategies[3], op['dma_2'])
self.assertEqual(op.strategies[4], op['custom'])
def test_opeartor_clear_strategies(self):
""" test operator clear strategies"""
op = qt.Operator('dma, all, urgent')
op.add_strategies(['dma', 'macd'])
op.add_strategies(['DMA', TestLSStrategy()])
self.assertEqual(op.strategy_count, 7)
print('test removing strategies from Operator')
op.clear_strategies()
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op.add_strategy('dma', pars=(12, 123, 25))
self.assertEqual(op.strategy_count, 1)
self.assertEqual(op.strategy_ids, ['dma'])
self.assertEqual(type(op.strategies[0]), TimingDMA)
self.assertEqual(op.strategies[0].pars, (12, 123, 25))
op.clear_strategies()
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
def test_operator_prepare_data(self):
"""test processes that related to prepare data"""
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sig = TestSigStrategy()
self.op = qt.Operator(strategies=[test_ls, test_sel, test_sig])
too_early_cash = qt.CashPlan(dates='2016-01-01', amounts=10000)
early_cash = qt.CashPlan(dates='2016-07-01', amounts=10000)
on_spot_cash = qt.CashPlan(dates='2016-07-08', amounts=10000)
no_trade_cash = qt.CashPlan(dates='2016-07-08, 2016-07-30, 2016-08-11, 2016-09-03',
amounts=[10000, 10000, 10000, 10000])
# 在所有策略的参数都设置好之前调用prepare_data会发生assertion Error
self.assertRaises(AssertionError,
self.op.prepare_data,
hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
late_cash = qt.CashPlan(dates='2016-12-31', amounts=10000)
multi_cash = qt.CashPlan(dates='2016-07-08, 2016-08-08', amounts=[10000, 10000])
self.op.set_parameter(stg_id='custom',
pars={'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.assertEqual(self.op.strategies[0].pars, {'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.op.set_parameter(stg_id='custom_1',
pars=())
self.assertEqual(self.op.strategies[1].pars, ()),
self.op.set_parameter(stg_id='custom_2',
pars=(0.2, 0.02, -0.02))
self.assertEqual(self.op.strategies[2].pars, (0.2, 0.02, -0.02)),
self.op.prepare_data(hist_data=self.hp1,
cash_plan=on_spot_cash)
self.assertIsInstance(self.op._op_history_data, dict)
self.assertEqual(len(self.op._op_history_data), 3)
# test if automatic strategy blenders are set
self.assertEqual(self.op.strategy_blenders,
{'close': ['+', '2', '+', '1', '0']})
tim_hist_data = self.op._op_history_data['custom']
sel_hist_data = self.op._op_history_data['custom_1']
ric_hist_data = self.op._op_history_data['custom_2']
print(f'in test_prepare_data in TestOperator:')
print('selecting history data:\n', sel_hist_data)
print('originally passed data in correct sequence:\n', self.test_data_3D[:, 3:, [2, 3, 0]])
print('difference is \n', sel_hist_data - self.test_data_3D[:, :, [2, 3, 0]])
self.assertTrue(np.allclose(sel_hist_data, self.test_data_3D[:, :, [2, 3, 0]], equal_nan=True))
self.assertTrue(np.allclose(tim_hist_data, self.test_data_3D, equal_nan=True))
self.assertTrue(np.allclose(ric_hist_data, self.test_data_3D[:, 3:, :], equal_nan=True))
# raises Value Error if empty history panel is given
empty_hp = qt.HistoryPanel()
correct_hp = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 4)),
columns=self.types,
levels=self.shares,
rows=self.date_indices)
too_many_shares = qt.HistoryPanel(values=np.random.randint(10, size=(5, 50, 4)))
too_many_types = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 5)))
# raises Error when history panel is empty
self.assertRaises(ValueError,
self.op.prepare_data,
empty_hp,
on_spot_cash)
# raises Error when first investment date is too early
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
early_cash)
# raises Error when last investment date is too late
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
late_cash)
# raises Error when some of the investment dates are on no-trade-days
self.assertRaises(ValueError,
self.op.prepare_data,
correct_hp,
no_trade_cash)
# raises Error when number of shares in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_shares,
on_spot_cash)
# raises Error when too early cash investment date
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
too_early_cash)
# raises Error when number of d_types in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_types,
on_spot_cash)
# test the effect of data type sequence in strategy definition
def test_operator_generate(self):
""" Test signal generation process of operator objects
:return:
"""
# 使用test模块的自定义策略生成三种交易策略
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sel2 = TestSelStrategyDiffTime()
test_sig = TestSigStrategy()
print('--Test PT type signal generation--')
# 测试PT类型的信号生成:
# 创建一个Operator对象,信号类型为PT(比例目标信号)
# 这个Operator对象包含两个策略,分别为LS-Strategy以及Sel-Strategy,代表择时和选股策略
# 两个策略分别生成PT信号后混合成一个信号输出
self.op = qt.Operator(strategies=[test_ls, test_sel])
self.op.set_parameter(stg_id='custom',
pars={'000010': (5, 10.),
'000030': (5, 10.),
'000039': (5, 6.)})
self.op.set_parameter(stg_id=1,
pars=())
# self.a_to_sell.set_blender(blender='0+1+2')
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
print('--test operator information in normal mode--')
self.op.info()
self.assertEqual(self.op.strategy_blenders,
{'close': ['+', '1', '0']})
self.op.set_blender(None, '0*1')
self.assertEqual(self.op.strategy_blenders,
{'close': ['*', '1', '0']})
print('--test operation signal created in Proportional Target (PT) Mode--')
op_list = self.op.create_signal(hist_data=self.hp1)
self.assertTrue(isinstance(op_list, HistoryPanel))
backtest_price_types = op_list.htypes
self.assertEqual(backtest_price_types[0], 'close')
self.assertEqual(op_list.shape, (3, 45, 1))
reduced_op_list = op_list.values.squeeze().T
print(f'op_list created, it is a 3 share/45 days/1 htype array, to make comparison happen, \n'
f'it will be squeezed to a 2-d array to compare on share-wise:\n'
f'{reduced_op_list}')
target_op_values = np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0]])
self.assertTrue(np.allclose(target_op_values, reduced_op_list, equal_nan=True))
print('--Test two separate signal generation for different price types--')
# 测试两组PT类型的信号生成:
# 在Operator对象中增加两个SigStrategy策略,策略类型相同但是策略的参数不同,回测价格类型为"OPEN"
# Opeartor应该生成两组交易信号,分别用于"close"和"open"两中不同的价格类型
# 这里需要重新生成两个新的交易策略对象,否则在op的strategies列表中产生重复的对象引用,从而引起错误
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
self.op.add_strategies([test_ls, test_sel])
self.op.set_parameter(stg_id='custom_2',
price_type='open')
self.op.set_parameter(stg_id='custom_3',
price_type='open')
self.assertEqual(self.op['custom'].price_type, 'close')
self.assertEqual(self.op['custom_2'].price_type, 'open')
self.op.set_parameter(stg_id='custom_2',
pars={'000010': (5, 10.),
'000030': (5, 10.),
'000039': (5, 6.)})
self.op.set_parameter(stg_id='custom_3',
pars=())
self.op.set_blender(blender='0 or 1', price_type='open')
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
print('--test how operator information is printed out--')
self.op.info()
self.assertEqual(self.op.strategy_blenders,
{'close': ['*', '1', '0'],
'open': ['or', '1', '0']})
print('--test opeartion signal created in Proportional Target (PT) Mode--')
op_list = self.op.create_signal(hist_data=self.hp1)
self.assertTrue(isinstance(op_list, HistoryPanel))
signal_close = op_list['close'].squeeze().T
signal_open = op_list['open'].squeeze().T
self.assertEqual(signal_close.shape, (45, 3))
self.assertEqual(signal_open.shape, (45, 3))
target_op_close = np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0]])
target_op_open = np.array([[0.5, 0.5, 1.0],
[0.5, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 0.5, 0.0],
[1.0, 0.5, 0.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.5, 1.0, 0.0],
[0.5, 1.0, 0.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0]])
signal_pairs = [[list(sig1), list(sig2), sig1 == sig2]
for sig1, sig2
in zip(list(target_op_close), list(signal_close))]
print(f'signals side by side:\n'
f'{signal_pairs}')
self.assertTrue(np.allclose(target_op_close, signal_close, equal_nan=True))
signal_pairs = [[list(sig1), list(sig2), sig1 == sig2]
for sig1, sig2
in zip(list(target_op_open), list(signal_open))]
print(f'signals side by side:\n'
f'{signal_pairs}')
self.assertTrue(np.allclose(target_op_open, signal_open, equal_nan=True))
print('--Test two separate signal generation for different price types--')
# 更多测试集合
def test_stg_parameter_setting(self):
""" test setting parameters of strategies
test the method set_parameters
:return:
"""
op = qt.Operator(strategies='dma, all, urgent')
print(op.strategies, '\n', [qt.TimingDMA, qt.SelectingAll, qt.RiconUrgent])
print(f'info of Timing strategy in new op: \n{op.strategies[0].info()}')
# TODO: allow set_parameters to a list of strategies or str-listed strategies
# TODO: allow set_parameters to all strategies of specific bt price type
print(f'Set up strategy parameters by strategy id')
op.set_parameter('dma',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
op.set_parameter('all',
window_length=20)
op.set_parameter('all', price_type='high')
print(f'Can also set up strategy parameters by strategy index')
op.set_parameter(2, price_type='open')
op.set_parameter(2,
opt_tag=1,
pars=(9, -0.09),
window_length=10)
self.assertEqual(op.strategies[0].pars, (5, 10, 5))
self.assertEqual(op.strategies[0].par_boes, ((5, 10), (5, 15), (10, 15)))
self.assertEqual(op.strategies[2].pars, (9, -0.09))
self.assertEqual(op.op_data_freq, 'd')
self.assertEqual(op.op_data_types, ['close', 'high', 'open'])
self.assertEqual(op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (1, 40), (-0.5, 0.5)],
['discr', 'discr', 'discr', 'discr', 'conti']))
self.assertEqual(op.max_window_length, 20)
print(f'KeyError will be raised if wrong strategy id is given')
self.assertRaises(KeyError, op.set_parameter, stg_id='t-1', pars=(1, 2))
self.assertRaises(KeyError, op.set_parameter, stg_id='wrong_input', pars=(1, 2))
print(f'ValueError will be raised if parameter can be set')
self.assertRaises(ValueError, op.set_parameter, stg_id=0, pars=('wrong input', 'wrong input'))
# test blenders of different price types
# test setting blenders to different price types
# TODO: to allow operands like "and", "or", "not", "xor"
# a_to_sell.set_blender('close', '0 and 1 or 2')
# self.assertEqual(a_to_sell.get_blender('close'), 'str-1.2')
self.assertEqual(op.bt_price_types, ['close', 'high', 'open'])
op.set_blender('open', '0 & 1 | 2')
self.assertEqual(op.get_blender('open'), ['|', '2', '&', '1', '0'])
op.set_blender('high', '(0|1) & 2')
self.assertEqual(op.get_blender('high'), ['&', '2', '|', '1', '0'])
op.set_blender('close', '0 & 1 | 2')
self.assertEqual(op.get_blender(), {'close': ['|', '2', '&', '1', '0'],
'high': ['&', '2', '|', '1', '0'],
'open': ['|', '2', '&', '1', '0']})
self.assertEqual(op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (1, 40), (-0.5, 0.5)],
['discr', 'discr', 'discr', 'discr', 'conti']))
self.assertEqual(op.opt_tags, [1, 0, 1])
def test_signal_blend(self):
self.assertEqual(blender_parser('0 & 1'), ['&', '1', '0'])
self.assertEqual(blender_parser('0 or 1'), ['or', '1', '0'])
self.assertEqual(blender_parser('0 & 1 | 2'), ['|', '2', '&', '1', '0'])
blender = blender_parser('0 & 1 | 2')
self.assertEqual(signal_blend([1, 1, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 1, 0], blender), 1)
self.assertEqual(signal_blend([0, 1, 1], blender), 1)
self.assertEqual(signal_blend([0, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 0], blender), 0)
self.assertEqual(signal_blend([0, 1, 0], blender), 0)
self.assertEqual(signal_blend([0, 0, 0], blender), 0)
# parse: '0 & ( 1 | 2 )'
self.assertEqual(blender_parser('0 & ( 1 | 2 )'), ['&', '|', '2', '1', '0'])
blender = blender_parser('0 & ( 1 | 2 )')
self.assertEqual(signal_blend([1, 1, 1], blender), 1)
self.assertEqual(signal_blend([1, 0, 1], blender), 1)
self.assertEqual(signal_blend([1, 1, 0], blender), 1)
self.assertEqual(signal_blend([0, 1, 1], blender), 0)
self.assertEqual(signal_blend([0, 0, 1], blender), 0)
self.assertEqual(signal_blend([1, 0, 0], blender), 0)
self.assertEqual(signal_blend([0, 1, 0], blender), 0)
self.assertEqual(signal_blend([0, 0, 0], blender), 0)
# parse: '(1-2)/3 + 0'
self.assertEqual(blender_parser('(1-2)/3 + 0'), ['+', '0', '/', '3', '-', '2', '1'])
blender = blender_parser('(1-2)/3 + 0')
self.assertEqual(signal_blend([5, 9, 1, 4], blender), 7)
# pars: '(0*1/2*(3+4))+5*(6+7)-8'
self.assertEqual(blender_parser('(0*1/2*(3+4))+5*(6+7)-8'), ['-', '8', '+', '*', '+', '7', '6', '5', '*',
'+', '4', '3', '/', '2', '*', '1', '0'])
blender = blender_parser('(0*1/2*(3+4))+5*(6+7)-8')
self.assertEqual(signal_blend([1, 1, 1, 1, 1, 1, 1, 1, 1], blender), 3)
self.assertEqual(signal_blend([2, 1, 4, 3, 5, 5, 2, 2, 10], blender), 14)
# parse: '0/max(2,1,3 + 5)+4'
self.assertEqual(blender_parser('0/max(2,1,3 + 5)+4'), ['+', '4', '/', 'max(3)', '+', '5', '3', '1', '2', '0'])
blender = blender_parser('0/max(2,1,3 + 5)+4')
self.assertEqual(signal_blend([8.0, 4, 3, 5.0, 0.125, 5], blender), 0.925)
self.assertEqual(signal_blend([2, 1, 4, 3, 5, 5, 2, 2, 10], blender), 5.25)
print('speed test')
import time
st = time.time()
blender = blender_parser('0+max(1,2,(3+4)*5, max(6, (7+8)*9), 10-11) * (12+13)')
res = []
for i in range(10000):
res = signal_blend([1, 1, 2, 3, 4, 5, 3, 4, 5, 6, 7, 8, 2, 3], blender)
et = time.time()
print(f'total time for RPN processing: {et - st}, got result: {res}')
blender = blender_parser("0 + 1 * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 7)
blender = blender_parser("(0 + 1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
blender = blender_parser("(0+1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
blender = blender_parser("(0 + 1) * 2")
self.assertEqual(signal_blend([1, 2, 3], blender), 9)
# TODO: 目前对于-(1+2)这样的表达式还无法处理
# self.a_to_sell.set_blender('selecting', "-(0 + 1) * 2")
# self.assertEqual(self.a_to_sell.signal_blend([1, 2, 3]), -9)
blender = blender_parser("(0-1)/2 + 3")
print(f'RPN of notation: "(0-1)/2 + 3" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, 2, 3, 0.0], blender), -0.33333333)
blender = blender_parser("0 + 1 / 2")
print(f'RPN of notation: "0 + 1 / 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, math.pi, 4], blender), 1.78539816)
blender = blender_parser("(0 + 1) / 2")
print(f'RPN of notation: "(0 + 1) / 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(signal_blend([1, 2, 3], blender), 1)
blender = blender_parser("(0 + 1 * 2) / 3")
print(f'RPN of notation: "(0 + 1 * 2) / 3" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([3, math.e, 10, 10], blender), 3.0182818284590454)
blender = blender_parser("0 / 1 * 2")
print(f'RPN of notation: "0 / 1 * 2" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(signal_blend([1, 3, 6], blender), 2)
blender = blender_parser("(0 - 1 + 2) * 4")
print(f'RPN of notation: "(0 - 1 + 2) * 4" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([1, 1, -1, np.nan, math.pi], blender), -3.141592653589793)
blender = blender_parser("0 * 1")
print(f'RPN of notation: "0 * 1" is:\n'
f'{" ".join(blender[::-1])}')
self.assertAlmostEqual(signal_blend([math.pi, math.e], blender), 8.539734222673566)
blender = blender_parser('abs(3-sqrt(2) / cos(1))')
print(f'RPN of notation: "abs(3-sqrt(2) / cos(1))" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['abs(1)', '-', '/', 'cos(1)', '1', 'sqrt(1)', '2', '3'])
blender = blender_parser('0/max(2,1,3 + 5)+4')
print(f'RPN of notation: "0/max(2,1,3 + 5)+4" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '4', '/', 'max(3)', '+', '5', '3', '1', '2', '0'])
blender = blender_parser('1 + sum(1,2,3+3, sum(1, 2) + 3) *5')
print(f'RPN of notation: "1 + sum(1,2,3+3, sum(1, 2) + 3) *5" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '*', '5', 'sum(4)', '+', '3', 'sum(2)', '2', '1',
'+', '3', '3', '2', '1', '1'])
blender = blender_parser('1+sum(1,2,(3+5)*4, sum(3, (4+5)*6), 7-8) * (2+3)')
print(f'RPN of notation: "1+sum(1,2,(3+5)*4, sum(3, (4+5)*6), 7-8) * (2+3)" is:\n'
f'{" ".join(blender[::-1])}')
self.assertEqual(blender, ['+', '*', '+', '3', '2', 'sum(5)', '-', '8', '7',
'sum(2)', '*', '6', '+', '5', '4', '3', '*', '4',
'+', '5', '3', '2', '1', '1'])
# TODO: ndarray type of signals to be tested:
def test_set_opt_par(self):
""" test setting opt pars in batch"""
print(f'--------- Testing setting Opt Pars: set_opt_par -------')
op = qt.Operator('dma, random, crossline')
op.set_parameter('dma',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.strategies[0].pars, (5, 10, 5))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (35, 120, 10, 'buy'))
self.assertEqual(op.opt_tags, [1, 0, 0])
op.set_opt_par((5, 12, 9))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (35, 120, 10, 'buy'))
op.set_parameter('crossline',
pars=(5, 10, 5, 'sell'),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15), ('buy', 'sell', 'none')),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.opt_tags, [1, 0, 1])
op.set_opt_par((5, 12, 9, 8, 26, 9, 'buy'))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
op.set_opt_par((9, 200, 155, 8, 26, 9, 'buy', 5, 12, 9))
self.assertEqual(op.strategies[0].pars, (9, 200, 155))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
# test set_opt_par when opt_tag is set to be 2 (enumerate type of parameters)
op.set_parameter('crossline',
pars=(5, 10, 5, 'sell'),
opt_tag=2,
par_boes=((5, 10), (5, 15), (10, 15), ('buy', 'sell', 'none')),
window_length=10,
data_types=['close', 'open', 'high'])
self.assertEqual(op.opt_tags, [1, 0, 2])
self.assertEqual(op.strategies[0].pars, (9, 200, 155))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (5, 10, 5, 'sell'))
op.set_opt_par((5, 12, 9, (8, 26, 9, 'buy')))
self.assertEqual(op.strategies[0].pars, (5, 12, 9))
self.assertEqual(op.strategies[1].pars, (0.5,))
self.assertEqual(op.strategies[2].pars, (8, 26, 9, 'buy'))
# Test Errors
# Not enough values for parameter
op.set_parameter('crossline', opt_tag=1)
self.assertRaises(ValueError, op.set_opt_par, (5, 12, 9, 8))
# wrong type of input
self.assertRaises(AssertionError, op.set_opt_par, [5, 12, 9, 7, 15, 12, 'sell'])
def test_stg_attribute_get_and_set(self):
self.stg = qt.TimingCrossline()
self.stg_type = 'R-TIMING'
self.stg_name = "CROSSLINE"
self.stg_text = 'Moving average crossline strategy, determine long/short position according to the cross ' \
'point' \
' of long and short term moving average prices '
self.pars = (35, 120, 10, 'buy')
self.par_boes = [(10, 250), (10, 250), (1, 100), ('buy', 'sell', 'none')]
self.par_count = 4
self.par_types = ['discr', 'discr', 'conti', 'enum']
self.opt_tag = 0
self.data_types = ['close']
self.data_freq = 'd'
self.sample_freq = 'd'
self.window_length = 270
self.assertEqual(self.stg.stg_type, self.stg_type)
self.assertEqual(self.stg.stg_name, self.stg_name)
self.assertEqual(self.stg.stg_text, self.stg_text)
self.assertEqual(self.stg.pars, self.pars)
self.assertEqual(self.stg.par_types, self.par_types)
self.assertEqual(self.stg.par_boes, self.par_boes)
self.assertEqual(self.stg.par_count, self.par_count)
self.assertEqual(self.stg.opt_tag, self.opt_tag)
self.assertEqual(self.stg.data_freq, self.data_freq)
self.assertEqual(self.stg.sample_freq, self.sample_freq)
self.assertEqual(self.stg.data_types, self.data_types)
self.assertEqual(self.stg.window_length, self.window_length)
self.stg.stg_name = 'NEW NAME'
self.stg.stg_text = 'NEW TEXT'
self.assertEqual(self.stg.stg_name, 'NEW NAME')
self.assertEqual(self.stg.stg_text, 'NEW TEXT')
self.stg.pars = (1, 2, 3, 4)
self.assertEqual(self.stg.pars, (1, 2, 3, 4))
self.stg.par_count = 3
self.assertEqual(self.stg.par_count, 3)
self.stg.par_boes = [(1, 10), (1, 10), (1, 10), (1, 10)]
self.assertEqual(self.stg.par_boes, [(1, 10), (1, 10), (1, 10), (1, 10)])
self.stg.par_types = ['conti', 'conti', 'discr', 'enum']
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'enum'])
self.stg.par_types = 'conti, conti, discr, conti'
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'conti'])
self.stg.data_types = 'close, open'
self.assertEqual(self.stg.data_types, ['close', 'open'])
self.stg.data_types = ['close', 'high', 'low']
self.assertEqual(self.stg.data_types, ['close', 'high', 'low'])
self.stg.data_freq = 'w'
self.assertEqual(self.stg.data_freq, 'w')
self.stg.window_length = 300
self.assertEqual(self.stg.window_length, 300)
def test_rolling_timing(self):
stg = TestLSStrategy()
stg_pars = {'000100': (5, 10),
'000200': (5, 10),
'000300': (5, 6)}
stg.set_pars(stg_pars)
history_data = self.hp1.values
output = stg.generate(hist_data=history_data)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
lsmask = np.array([[0., 0., 1.],
[0., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 0.],
[1., 1., 0.],
[1., 1., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.]])
# TODO: Issue to be solved: the np.nan value are converted to 0 in the lsmask,这样做可能会有意想不到的后果
# TODO: 需要解决nan值的问题
self.assertEqual(output.shape, lsmask.shape)
self.assertTrue(np.allclose(output, lsmask, equal_nan=True))
def test_sel_timing(self):
stg = TestSelStrategy()
stg_pars = ()
stg.set_pars(stg_pars)
history_data = self.hp1['high, low, close', :, :]
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
def test_simple_timing(self):
stg = TestSigStrategy()
stg_pars = (0.2, 0.02, -0.02)
stg.set_pars(stg_pars)
history_data = self.hp1['close, open, high, low', :, 3:50]
output = stg.generate(hist_data=history_data, shares=self.shares, dates=self.date_indices)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
sigmatrix = np.array([[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0]])
side_by_side_array = np.array([[i, out_line, sig_line]
for
i, out_line, sig_line
in zip(range(len(output)), output, sigmatrix)])
print(f'output and signal matrix lined up side by side is \n'
f'{side_by_side_array}')
self.assertEqual(sigmatrix.shape, output.shape)
self.assertTrue(np.allclose(output, sigmatrix))
def test_sel_finance(self):
"""Test selecting_finance strategy, test all built-in strategy parameters"""
stg = SelectingFinanceIndicator()
stg_pars = (False, 'even', 'greater', 0, 0, 0.67)
stg.set_pars(stg_pars)
stg.window_length = 5
stg.data_freq = 'd'
stg.sample_freq = '10d'
stg.sort_ascending = False
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg._poq = 0.67
history_data = self.hp2.values
print(f'Start to test financial selection parameter {stg_pars}')
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get mininum factor
stg_pars = (True, 'even', 'less', 1, 1, 0.67)
stg.sort_ascending = True
stg.condition = 'less'
stg.lbound = 1
stg.ubound = 1
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'linear', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'linear'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'proportion', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'proportion'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
# test single factor, get max factor in linear weight, threshold 0.2
stg_pars = (False, 'even', 'greater', 0.2, 0.2, 0.67)
stg.sort_ascending = False
stg.weighting = 'even'
stg.condition = 'greater'
stg.lbound = 0.2
stg.ubound = 0.2
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
def test_tokenizer(self):
self.assertListEqual(_exp_to_token('1+1'),
['1', '+', '1'])
print(_exp_to_token('1+1'))
self.assertListEqual(_exp_to_token('1 & 1'),
['1', '&', '1'])
print(_exp_to_token('1&1'))
self.assertListEqual(_exp_to_token('1 and 1'),
['1', 'and', '1'])
print(_exp_to_token('1 and 1'))
self.assertListEqual(_exp_to_token('1 or 1'),
['1', 'or', '1'])
print(_exp_to_token('1 or 1'))
self.assertListEqual(_exp_to_token('(1 - 1 + -1) * pi'),
['(', '1', '-', '1', '+', '-1', ')', '*', 'pi'])
print(_exp_to_token('(1 - 1 + -1) * pi'))
self.assertListEqual(_exp_to_token('abs(5-sqrt(2) / cos(pi))'),
['abs(', '5', '-', 'sqrt(', '2', ')', '/', 'cos(', 'pi', ')', ')'])
print(_exp_to_token('abs(5-sqrt(2) / cos(pi))'))
self.assertListEqual(_exp_to_token('sin(pi) + 2.14'),
['sin(', 'pi', ')', '+', '2.14'])
print(_exp_to_token('sin(pi) + 2.14'))
self.assertListEqual(_exp_to_token('(1-2)/3.0 + 0.0000'),
['(', '1', '-', '2', ')', '/', '3.0', '+', '0.0000'])
print(_exp_to_token('(1-2)/3.0 + 0.0000'))
self.assertListEqual(_exp_to_token('-(1. + .2) * max(1, 3, 5)'),
['-', '(', '1.', '+', '.2', ')', '*', 'max(', '1', ',', '3', ',', '5', ')'])
print(_exp_to_token('-(1. + .2) * max(1, 3, 5)'))
self.assertListEqual(_exp_to_token('(x + e * 10) / 10'),
['(', 'x', '+', 'e', '*', '10', ')', '/', '10'])
print(_exp_to_token('(x + e * 10) / 10'))
self.assertListEqual(_exp_to_token('8.2/((-.1+abs3(3,4,5))*0.12)'),
['8.2', '/', '(', '(', '-.1', '+', 'abs3(', '3', ',', '4', ',', '5', ')', ')', '*', '0.12',
')'])
print(_exp_to_token('8.2/((-.1+abs3(3,4,5))*0.12)'))
self.assertListEqual(_exp_to_token('8.2/abs3(3,4,25.34 + 5)*0.12'),
['8.2', '/', 'abs3(', '3', ',', '4', ',', '25.34', '+', '5', ')', '*', '0.12'])
print(_exp_to_token('8.2/abs3(3,4,25.34 + 5)*0.12'))
class TestLog(unittest.TestCase):
def test_init(self):
pass
class TestConfig(unittest.TestCase):
"""测试Config对象以及QT_CONFIG变量的设置和获取值"""
def test_init(self):
pass
def test_invest(self):
pass
def test_pars_string_to_type(self):
_parse_string_kwargs('000300', 'asset_pool', _valid_qt_kwargs())
class TestHistoryPanel(unittest.TestCase):
def setUp(self):
print('start testing HistoryPanel object\n')
self.data = np.random.randint(10, size=(5, 10, 4))
self.index = pd.date_range(start='20200101', freq='d', periods=10)
self.index2 = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14']
self.index3 = '2016-07-01, 2016-07-04, 2016-07-05, 2016-07-06, 2016-07-07, ' \
'2016-07-08, 2016-07-11, 2016-07-12, 2016-07-13, 2016-07-14'
self.shares = '000100,000101,000102,000103,000104'
self.htypes = 'close,open,high,low'
self.data2 = np.random.randint(10, size=(10, 5))
self.data3 = np.random.randint(10, size=(10, 4))
self.data4 = np.random.randint(10, size=(10))
self.hp = qt.HistoryPanel(values=self.data, levels=self.shares, columns=self.htypes, rows=self.index)
self.hp2 = qt.HistoryPanel(values=self.data2, levels=self.shares, columns='close', rows=self.index)
self.hp3 = qt.HistoryPanel(values=self.data3, levels='000100', columns=self.htypes, rows=self.index2)
self.hp4 = qt.HistoryPanel(values=self.data4, levels='000100', columns='close', rows=self.index3)
self.hp5 = qt.HistoryPanel(values=self.data)
self.hp6 = qt.HistoryPanel(values=self.data, levels=self.shares, rows=self.index3)
def test_properties(self):
""" test all properties of HistoryPanel
"""
self.assertFalse(self.hp.is_empty)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.shape, (5, 10, 4))
self.assertSequenceEqual(self.hp.htypes, ['close', 'open', 'high', 'low'])
self.assertSequenceEqual(self.hp.shares, ['000100', '000101', '000102', '000103', '000104'])
self.assertSequenceEqual(list(self.hp.hdates), list(self.index))
self.assertDictEqual(self.hp.columns, {'close': 0, 'open': 1, 'high': 2, 'low': 3})
self.assertDictEqual(self.hp.levels, {'000100': 0, '000101': 1, '000102': 2, '000103': 3, '000104': 4})
row_dict = {Timestamp('2020-01-01 00:00:00', freq='D'): 0,
Timestamp('2020-01-02 00:00:00', freq='D'): 1,
Timestamp('2020-01-03 00:00:00', freq='D'): 2,
Timestamp('2020-01-04 00:00:00', freq='D'): 3,
Timestamp('2020-01-05 00:00:00', freq='D'): 4,
Timestamp('2020-01-06 00:00:00', freq='D'): 5,
Timestamp('2020-01-07 00:00:00', freq='D'): 6,
Timestamp('2020-01-08 00:00:00', freq='D'): 7,
Timestamp('2020-01-09 00:00:00', freq='D'): 8,
Timestamp('2020-01-10 00:00:00', freq='D'): 9}
self.assertDictEqual(self.hp.rows, row_dict)
def test_len(self):
""" test the function len(HistoryPanel)
:return:
"""
self.assertEqual(len(self.hp), 10)
def test_empty_history_panel(self):
"""测试空HP或者特殊HP如维度标签为纯数字的HP"""
test_hp = qt.HistoryPanel(self.data)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
self.assertEqual(test_hp.level_count, 5)
self.assertEqual(test_hp.row_count, 10)
self.assertEqual(test_hp.column_count, 4)
self.assertEqual(test_hp.shares, list(range(5)))
self.assertEqual(test_hp.hdates, list(pd.date_range(start='20200730', periods=10, freq='d')))
self.assertEqual(test_hp.htypes, list(range(4)))
self.assertTrue(np.allclose(test_hp.values, self.data))
print(f'shares: {test_hp.shares}\nhtypes: {test_hp.htypes}')
print(test_hp)
# HistoryPanel should be empty if no value is given
empty_hp = qt.HistoryPanel()
self.assertTrue(empty_hp.is_empty)
self.assertIsInstance(empty_hp, qt.HistoryPanel)
self.assertEqual(empty_hp.shape[0], 0)
self.assertEqual(empty_hp.shape[1], 0)
self.assertEqual(empty_hp.shape[2], 0)
self.assertEqual(empty_hp.level_count, 0)
self.assertEqual(empty_hp.row_count, 0)
self.assertEqual(empty_hp.column_count, 0)
# HistoryPanel should also be empty if empty value (np.array([])) is given
empty_hp = qt.HistoryPanel(np.empty((5, 0, 4)), levels=self.shares, columns=self.htypes)
self.assertTrue(empty_hp.is_empty)
self.assertIsInstance(empty_hp, qt.HistoryPanel)
self.assertEqual(empty_hp.shape[0], 0)
self.assertEqual(empty_hp.shape[1], 0)
self.assertEqual(empty_hp.shape[2], 0)
self.assertEqual(empty_hp.level_count, 0)
self.assertEqual(empty_hp.row_count, 0)
self.assertEqual(empty_hp.column_count, 0)
def test_create_history_panel(self):
""" test the creation of a HistoryPanel object by passing all data explicitly
"""
self.assertIsInstance(self.hp, qt.HistoryPanel)
self.assertEqual(self.hp.shape[0], 5)
self.assertEqual(self.hp.shape[1], 10)
self.assertEqual(self.hp.shape[2], 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(list(self.hp.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp2, qt.HistoryPanel)
self.assertEqual(self.hp2.shape[0], 5)
self.assertEqual(self.hp2.shape[1], 10)
self.assertEqual(self.hp2.shape[2], 1)
self.assertEqual(self.hp2.level_count, 5)
self.assertEqual(self.hp2.row_count, 10)
self.assertEqual(self.hp2.column_count, 1)
self.assertEqual(list(self.hp2.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp2.columns.keys()), ['close'])
self.assertEqual(list(self.hp2.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp3, qt.HistoryPanel)
self.assertEqual(self.hp3.shape[0], 1)
self.assertEqual(self.hp3.shape[1], 10)
self.assertEqual(self.hp3.shape[2], 4)
self.assertEqual(self.hp3.level_count, 1)
self.assertEqual(self.hp3.row_count, 10)
self.assertEqual(self.hp3.column_count, 4)
self.assertEqual(list(self.hp3.levels.keys()), ['000100'])
self.assertEqual(list(self.hp3.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp3.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.assertIsInstance(self.hp4, qt.HistoryPanel)
self.assertEqual(self.hp4.shape[0], 1)
self.assertEqual(self.hp4.shape[1], 10)
self.assertEqual(self.hp4.shape[2], 1)
self.assertEqual(self.hp4.level_count, 1)
self.assertEqual(self.hp4.row_count, 10)
self.assertEqual(self.hp4.column_count, 1)
self.assertEqual(list(self.hp4.levels.keys()), ['000100'])
self.assertEqual(list(self.hp4.columns.keys()), ['close'])
self.assertEqual(list(self.hp4.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.hp5.info()
self.assertIsInstance(self.hp5, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp5.values, self.data))
self.assertEqual(self.hp5.shape[0], 5)
self.assertEqual(self.hp5.shape[1], 10)
self.assertEqual(self.hp5.shape[2], 4)
self.assertEqual(self.hp5.level_count, 5)
self.assertEqual(self.hp5.row_count, 10)
self.assertEqual(self.hp5.column_count, 4)
self.assertEqual(list(self.hp5.levels.keys()), [0, 1, 2, 3, 4])
self.assertEqual(list(self.hp5.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp5.rows.keys())[0], pd.Timestamp('2020-07-30'))
self.hp6.info()
self.assertIsInstance(self.hp6, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp6.values, self.data))
self.assertEqual(self.hp6.shape[0], 5)
self.assertEqual(self.hp6.shape[1], 10)
self.assertEqual(self.hp6.shape[2], 4)
self.assertEqual(self.hp6.level_count, 5)
self.assertEqual(self.hp6.row_count, 10)
self.assertEqual(self.hp6.column_count, 4)
self.assertEqual(list(self.hp6.levels.keys()), ['000100', '000101', '000102', '000103', '000104'])
self.assertEqual(list(self.hp6.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp6.rows.keys())[0], pd.Timestamp('2016-07-01'))
print('test creating HistoryPanel with very limited data')
print('test creating HistoryPanel with 2D data')
temp_data = np.random.randint(10, size=(7, 3)).astype('float')
temp_hp = qt.HistoryPanel(temp_data)
# Error testing during HistoryPanel creating
# shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data,
levels=self.shares, columns='close', rows=self.index)
# valus is not np.ndarray
self.assertRaises(TypeError,
qt.HistoryPanel,
list(self.data))
# dimension/shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data2,
levels='000100', columns=self.htypes, rows=self.index)
# value dimension over 3
self.assertRaises(AssertionError,
qt.HistoryPanel,
np.random.randint(10, size=(5, 10, 4, 2)))
# lebel value not valid
self.assertRaises(ValueError,
qt.HistoryPanel,
self.data2,
levels=self.shares, columns='close',
rows='a,b,c,d,e,f,g,h,i,j')
def test_history_panel_slicing(self):
"""测试HistoryPanel的各种切片方法
包括通过标签名称切片,通过数字切片,通过逗号分隔的标签名称切片,通过冒号分隔的标签名称切片等切片方式"""
self.assertTrue(np.allclose(self.hp['close'], self.data[:, :, 0:1]))
self.assertTrue(np.allclose(self.hp['close,open'], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp[['close', 'open']], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp['close:high'], self.data[:, :, 0:3]))
self.assertTrue(np.allclose(self.hp['close,high'], self.data[:, :, [0, 2]]))
self.assertTrue(np.allclose(self.hp[:, '000100'], self.data[0:1, :, ]))
self.assertTrue(np.allclose(self.hp[:, '000100,000101'], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, ['000100', '000101']], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, '000100:000102'], self.data[0:3, :]))
self.assertTrue(np.allclose(self.hp[:, '000100,000102'], self.data[[0, 2], :]))
self.assertTrue(np.allclose(self.hp['close,open', '000100,000102'], self.data[[0, 2], :, 0:2]))
print('start testing HistoryPanel')
data = np.random.randint(10, size=(10, 5))
# index = pd.date_range(start='20200101', freq='d', periods=10)
shares = '000100,000101,000102,000103,000104'
dtypes = 'close'
df = pd.DataFrame(data)
print('=========================\nTesting HistoryPanel creation from DataFrame')
hp = qt.dataframe_to_hp(df=df, shares=shares, htypes=dtypes)
hp.info()
hp = qt.dataframe_to_hp(df=df, shares='000100', htypes='close, open, high, low, middle', column_type='htypes')
hp.info()
print('=========================\nTesting HistoryPanel creation from initialization')
data = np.random.randint(10, size=(5, 10, 4)).astype('float')
index = pd.date_range(start='20200101', freq='d', periods=10)
dtypes = 'close, open, high,low'
data[0, [5, 6, 9], [0, 1, 3]] = np.nan
data[1:4, [4, 7, 6, 2], [1, 1, 3, 0]] = np.nan
data[4:5, [2, 9, 1, 2], [0, 3, 2, 1]] = np.nan
hp = qt.HistoryPanel(data, levels=shares, columns=dtypes, rows=index)
hp.info()
print('==========================\n输出close类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close', :, :], data[:, :, 0:1], equal_nan=True))
print(f'==========================\n输出close和open类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1], :, :], data[:, :, 0:2], equal_nan=True))
print(f'==========================\n输出第一只股票的所有类型历史数据\n')
self.assertTrue(np.allclose(hp[:, [0], :], data[0:1, :, :], equal_nan=True))
print('==========================\n输出第0、1、2个htype对应的所有股票全部历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1, 2]], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[['close', 'high']], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出0、1两个htype的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1]], data[:, :, 0:2], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close,high'], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出close起到high止的三个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close:high'], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出0、1、3三个股票的全部历史数据\n')
self.assertTrue(np.allclose(hp[:, [0, 1, 3]], data[[0, 1, 3], :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, ['000100', '000102']], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出0、1、2三个股票的历史数据\n', hp[:, 0: 3])
self.assertTrue(np.allclose(hp[:, 0: 3], data[0:3, :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100, 000102'], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出所有股票的0-7日历史数据\n')
self.assertTrue(np.allclose(hp[:, :, 0:8], data[:, 0:8, :], equal_nan=True))
print('==========================\n输出000100股票的0-7日历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100', 0:8], data[0, 0:8, :], equal_nan=True))
print('==========================\nstart testing multy axis slicing of HistoryPanel object')
print('==========================\n输出000100、000120两只股票的close、open两组历史数据\n',
hp['close,open', ['000100', '000102']])
print('==========================\n输出000100、000120两只股票的close到open三组历史数据\n',
hp['close,open', '000100, 000102'])
print(f'historyPanel: hp:\n{hp}')
print(f'data is:\n{data}')
hp.htypes = 'open,high,low,close'
hp.info()
hp.shares = ['000300', '600227', '600222', '000123', '000129']
hp.info()
def test_segment(self):
"""测试历史数据片段的获取"""
test_hp = qt.HistoryPanel(self.data,
levels=self.shares,
columns=self.htypes,
rows=self.index2)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
print(f'Test segment with None parameters')
seg1 = test_hp.segment()
seg2 = test_hp.segment('20150202')
seg3 = test_hp.segment(end_date='20201010')
self.assertIsInstance(seg1, qt.HistoryPanel)
self.assertIsInstance(seg2, qt.HistoryPanel)
self.assertIsInstance(seg3, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp.values
))
self.assertTrue(np.allclose(
seg2.values, test_hp.values
))
self.assertTrue(np.allclose(
seg3.values, test_hp.values
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
self.assertEqual(seg2.htypes, test_hp.htypes)
self.assertEqual(seg2.shares, test_hp.shares)
self.assertEqual(seg3.htypes, test_hp.htypes)
self.assertEqual(seg3.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
self.assertEqual(seg2.hdates, test_hp.hdates)
self.assertEqual(seg3.hdates, test_hp.hdates)
print(f'Test segment with proper dates')
seg1 = test_hp.segment()
seg2 = test_hp.segment('20160704')
seg3 = test_hp.segment(start_date='2016-07-05',
end_date='20160708')
self.assertIsInstance(seg1, qt.HistoryPanel)
self.assertIsInstance(seg2, qt.HistoryPanel)
self.assertIsInstance(seg3, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp[:, :, :]
))
self.assertTrue(np.allclose(
seg2.values, test_hp[:, :, 1:10]
))
self.assertTrue(np.allclose(
seg3.values, test_hp[:, :, 2:6]
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
self.assertEqual(seg2.htypes, test_hp.htypes)
self.assertEqual(seg2.shares, test_hp.shares)
self.assertEqual(seg3.htypes, test_hp.htypes)
self.assertEqual(seg3.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
self.assertEqual(seg2.hdates, test_hp.hdates[1:10])
self.assertEqual(seg3.hdates, test_hp.hdates[2:6])
print(f'Test segment with non-existing but in range dates')
seg1 = test_hp.segment()
seg2 = test_hp.segment('20160703')
seg3 = test_hp.segment(start_date='2016-07-03',
end_date='20160710')
self.assertIsInstance(seg1, qt.HistoryPanel)
self.assertIsInstance(seg2, qt.HistoryPanel)
self.assertIsInstance(seg3, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp[:, :, :]
))
self.assertTrue(np.allclose(
seg2.values, test_hp[:, :, 1:10]
))
self.assertTrue(np.allclose(
seg3.values, test_hp[:, :, 1:6]
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
self.assertEqual(seg2.htypes, test_hp.htypes)
self.assertEqual(seg2.shares, test_hp.shares)
self.assertEqual(seg3.htypes, test_hp.htypes)
self.assertEqual(seg3.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
self.assertEqual(seg2.hdates, test_hp.hdates[1:10])
self.assertEqual(seg3.hdates, test_hp.hdates[1:6])
print(f'Test segment with out-of-range dates')
seg1 = test_hp.segment(start_date='2016-05-03',
end_date='20160910')
self.assertIsInstance(seg1, qt.HistoryPanel)
# check values
self.assertTrue(np.allclose(
seg1.values, test_hp[:, :, :]
))
# check that htypes and shares should be same
self.assertEqual(seg1.htypes, test_hp.htypes)
self.assertEqual(seg1.shares, test_hp.shares)
# check that hdates are the same
self.assertEqual(seg1.hdates, test_hp.hdates)
def test_slice(self):
"""测试历史数据切片的获取"""
test_hp = qt.HistoryPanel(self.data,
levels=self.shares,
columns=self.htypes,
rows=self.index2)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
print(f'Test slice with shares')
share = '000101'
slc = test_hp.slice(shares=share)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, ['000101'])
self.assertEqual(slc.htypes, test_hp.htypes)
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp[:, '000101']))
share = '000101, 000103'
slc = test_hp.slice(shares=share)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, ['000101', '000103'])
self.assertEqual(slc.htypes, test_hp.htypes)
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp[:, '000101, 000103']))
print(f'Test slice with htypes')
htype = 'open'
slc = test_hp.slice(htypes=htype)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, test_hp.shares)
self.assertEqual(slc.htypes, ['open'])
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp['open']))
htype = 'open, close'
slc = test_hp.slice(htypes=htype)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, test_hp.shares)
self.assertEqual(slc.htypes, ['open', 'close'])
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp['open, close']))
# test that slicing of "open, close" does NOT equal to "close, open"
self.assertFalse(np.allclose(slc.values, test_hp['close, open']))
print(f'Test slicing with both htypes and shares')
share = '000103, 000101'
htype = 'high, low, close'
slc = test_hp.slice(shares=share, htypes=htype)
self.assertIsInstance(slc, qt.HistoryPanel)
self.assertEqual(slc.shares, ['000103', '000101'])
self.assertEqual(slc.htypes, ['high', 'low', 'close'])
self.assertEqual(slc.hdates, test_hp.hdates)
self.assertTrue(np.allclose(slc.values, test_hp['high, low, close', '000103, 000101']))
print(f'Test Error cases')
# duplicated input
htype = 'open, close, open'
self.assertRaises(AssertionError, test_hp.slice, htypes=htype)
def test_relabel(self):
new_shares_list = ['000001', '000002', '000003', '000004', '000005']
new_shares_str = '000001, 000002, 000003, 000004, 000005'
new_htypes_list = ['close', 'volume', 'value', 'exchange']
new_htypes_str = 'close, volume, value, exchange'
temp_hp = self.hp.copy()
temp_hp.re_label(shares=new_shares_list)
print(temp_hp.info())
print(temp_hp.htypes)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.htypes, temp_hp.htypes)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.shares, new_shares_list)
temp_hp = self.hp.copy()
temp_hp.re_label(shares=new_shares_str)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.htypes, temp_hp.htypes)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.shares, new_shares_list)
temp_hp = self.hp.copy()
temp_hp.re_label(htypes=new_htypes_list)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.shares, temp_hp.shares)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.htypes, new_htypes_list)
temp_hp = self.hp.copy()
temp_hp.re_label(htypes=new_htypes_str)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.shares, temp_hp.shares)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.htypes, new_htypes_list)
print(f'test errors raising')
temp_hp = self.hp.copy()
self.assertRaises(AssertionError, temp_hp.re_label, htypes=new_shares_str)
self.assertRaises(TypeError, temp_hp.re_label, htypes=123)
self.assertRaises(AssertionError, temp_hp.re_label, htypes='wrong input!')
def test_csv_to_hp(self):
pass
def test_hdf_to_hp(self):
pass
def test_hp_join(self):
# TODO: 这里需要加强,需要用具体的例子确认hp_join的结果正确
# TODO: 尤其是不同的shares、htypes、hdates,以及它们在顺
# TODO: 序不同的情况下是否能正确地组合
print(f'join two simple HistoryPanels with same shares')
temp_hp = self.hp.join(self.hp2, same_shares=True)
self.assertIsInstance(temp_hp, qt.HistoryPanel)
def test_df_to_hp(self):
print(f'test converting DataFrame to HistoryPanel')
data = np.random.randint(10, size=(10, 5))
df1 = pd.DataFrame(data)
df2 = pd.DataFrame(data, columns=str_to_list(self.shares))
df3 = pd.DataFrame(data[:, 0:4])
df4 = pd.DataFrame(data[:, 0:4], columns=str_to_list(self.htypes))
hp = qt.dataframe_to_hp(df1, htypes='close')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, [0, 1, 2, 3, 4])
self.assertEqual(hp.htypes, ['close'])
self.assertEqual(hp.hdates, [pd.Timestamp('1970-01-01 00:00:00'),
pd.Timestamp('1970-01-01 00:00:00.000000001'),
pd.Timestamp('1970-01-01 00:00:00.000000002'),
pd.Timestamp('1970-01-01 00:00:00.000000003'),
pd.Timestamp('1970-01-01 00:00:00.000000004'),
pd.Timestamp('1970-01-01 00:00:00.000000005'),
pd.Timestamp('1970-01-01 00:00:00.000000006'),
pd.Timestamp('1970-01-01 00:00:00.000000007'),
pd.Timestamp('1970-01-01 00:00:00.000000008'),
pd.Timestamp('1970-01-01 00:00:00.000000009')])
hp = qt.dataframe_to_hp(df2, shares=self.shares, htypes='close')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, str_to_list(self.shares))
self.assertEqual(hp.htypes, ['close'])
hp = qt.dataframe_to_hp(df3, shares='000100', column_type='htypes')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, ['000100'])
self.assertEqual(hp.htypes, [0, 1, 2, 3])
hp = qt.dataframe_to_hp(df4, shares='000100', htypes=self.htypes, column_type='htypes')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, ['000100'])
self.assertEqual(hp.htypes, str_to_list(self.htypes))
hp.info()
self.assertRaises(KeyError, qt.dataframe_to_hp, df1)
def test_to_dataframe(self):
""" 测试HistoryPanel对象的to_dataframe方法
"""
print(f'START TEST == test_to_dataframe')
print(f'test converting test hp to dataframe with share == "000102":')
df_test = self.hp.to_dataframe(share='000102')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.htypes), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp[:, '000102'], values))
print(f'test DataFrame conversion with share == "000100"')
df_test = self.hp.to_dataframe(share='000100')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.htypes), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp[:, '000100'], values))
print(f'test DataFrame conversion error: type incorrect')
self.assertRaises(AssertionError, self.hp.to_dataframe, share=3.0)
print(f'test DataFrame error raising with share not found error')
self.assertRaises(KeyError, self.hp.to_dataframe, share='000300')
print(f'test DataFrame conversion with htype == "close"')
df_test = self.hp.to_dataframe(htype='close')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp['close'].T, values))
print(f'test DataFrame conversion with htype == "high"')
df_test = self.hp.to_dataframe(htype='high')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp['high'].T, values))
print(f'test DataFrame conversion with htype == "high" and dropna')
v = self.hp.values.astype('float')
v[:, 3, :] = np.nan
v[:, 4, :] = np.inf
test_hp = qt.HistoryPanel(v, levels=self.shares, columns=self.htypes, rows=self.index)
df_test = test_hp.to_dataframe(htype='high', dropna=True)
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates[:3]) + list(self.hp.hdates[4:]), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
target_values = test_hp['high'].T
target_values = target_values[np.where(~np.isnan(target_values))].reshape(9, 5)
self.assertTrue(np.allclose(target_values, values))
print(f'test DataFrame conversion with htype == "high", dropna and treat infs as na')
v = self.hp.values.astype('float')
v[:, 3, :] = np.nan
v[:, 4, :] = np.inf
test_hp = qt.HistoryPanel(v, levels=self.shares, columns=self.htypes, rows=self.index)
df_test = test_hp.to_dataframe(htype='high', dropna=True, inf_as_na=True)
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates[:3]) + list(self.hp.hdates[5:]), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
target_values = test_hp['high'].T
target_values = target_values[np.where(~np.isnan(target_values) & ~np.isinf(target_values))].reshape(8, 5)
self.assertTrue(np.allclose(target_values, values))
print(f'test DataFrame conversion error: type incorrect')
self.assertRaises(AssertionError, self.hp.to_dataframe, htype=pd.DataFrame())
print(f'test DataFrame error raising with share not found error')
self.assertRaises(KeyError, self.hp.to_dataframe, htype='non_type')
print(f'Raises ValueError when both or none parameter is given')
self.assertRaises(KeyError, self.hp.to_dataframe)
self.assertRaises(KeyError, self.hp.to_dataframe, share='000100', htype='close')
def test_to_df_dict(self):
"""测试HistoryPanel公有方法to_df_dict"""
print('test convert history panel slice by share')
df_dict = self.hp.to_df_dict('share')
self.assertEqual(self.hp.shares, list(df_dict.keys()))
df_dict = self.hp.to_df_dict()
self.assertEqual(self.hp.shares, list(df_dict.keys()))
print('test convert historypanel slice by htype ')
df_dict = self.hp.to_df_dict('htype')
self.assertEqual(self.hp.htypes, list(df_dict.keys()))
print('test raise assertion error')
self.assertRaises(AssertionError, self.hp.to_df_dict, by='random text')
self.assertRaises(AssertionError, self.hp.to_df_dict, by=3)
print('test empty hp')
df_dict = qt.HistoryPanel().to_df_dict('share')
self.assertEqual(df_dict, {})
def test_stack_dataframes(self):
print('test stack dataframes in a list')
df1 = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [2, 3, 4, 5], 'c': [3, 4, 5, 6]})
df1.index = ['20200101', '20200102', '20200103', '20200104']
df2 = pd.DataFrame({'b': [4, 3, 2, 1], 'd': [1, 1, 1, 1], 'c': [6, 5, 4, 3]})
df2.index = ['20200101', '20200102', '20200104', '20200105']
df3 = pd.DataFrame({'a': [6, 6, 6, 6], 'd': [4, 4, 4, 4], 'b': [2, 4, 6, 8]})
df3.index = ['20200101', '20200102', '20200103', '20200106']
values1 = np.array([[[1., 2., 3., np.nan],
[2., 3., 4., np.nan],
[3., 4., 5., np.nan],
[4., 5., 6., np.nan],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan]],
[[np.nan, 4., 6., 1.],
[np.nan, 3., 5., 1.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 2., 4., 1.],
[np.nan, 1., 3., 1.],
[np.nan, np.nan, np.nan, np.nan]],
[[6., 2., np.nan, 4.],
[6., 4., np.nan, 4.],
[6., 6., np.nan, 4.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[6., 8., np.nan, 4.]]])
values2 = np.array([[[1., np.nan, 6.],
[2., np.nan, 6.],
[3., np.nan, 6.],
[4., np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 6.]],
[[2., 4., 2.],
[3., 3., 4.],
[4., np.nan, 6.],
[5., 2., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 8.]],
[[3., 6., np.nan],
[4., 5., np.nan],
[5., np.nan, np.nan],
[6., 4., np.nan],
[np.nan, 3., np.nan],
[np.nan, np.nan, np.nan]],
[[np.nan, 1., 4.],
[np.nan, 1., 4.],
[np.nan, np.nan, 4.],
[np.nan, 1., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 4.]]])
print(df1.rename(index=pd.to_datetime))
print(df2.rename(index=pd.to_datetime))
print(df3.rename(index=pd.to_datetime))
hp1 = stack_dataframes([df1, df2, df3], stack_along='shares',
shares=['000100', '000200', '000300'])
hp2 = stack_dataframes([df1, df2, df3], stack_along='shares',
shares='000100, 000300, 000200')
print('hp1 is:\n', hp1)
print('hp2 is:\n', hp2)
self.assertEqual(hp1.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp1.shares, ['000100', '000200', '000300'])
self.assertTrue(np.allclose(hp1.values, values1, equal_nan=True))
self.assertEqual(hp2.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp2.shares, ['000100', '000300', '000200'])
self.assertTrue(np.allclose(hp2.values, values1, equal_nan=True))
hp3 = stack_dataframes([df1, df2, df3], stack_along='htypes',
htypes=['close', 'high', 'low'])
hp4 = stack_dataframes([df1, df2, df3], stack_along='htypes',
htypes='open, close, high')
print('hp3 is:\n', hp3.values)
print('hp4 is:\n', hp4.values)
self.assertEqual(hp3.htypes, ['close', 'high', 'low'])
self.assertEqual(hp3.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp3.values, values2, equal_nan=True))
self.assertEqual(hp4.htypes, ['open', 'close', 'high'])
self.assertEqual(hp4.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp4.values, values2, equal_nan=True))
print('test stack dataframes in a dict')
df1 = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [2, 3, 4, 5], 'c': [3, 4, 5, 6]})
df1.index = ['20200101', '20200102', '20200103', '20200104']
df2 = pd.DataFrame({'b': [4, 3, 2, 1], 'd': [1, 1, 1, 1], 'c': [6, 5, 4, 3]})
df2.index = ['20200101', '20200102', '20200104', '20200105']
df3 = pd.DataFrame({'a': [6, 6, 6, 6], 'd': [4, 4, 4, 4], 'b': [2, 4, 6, 8]})
df3.index = ['20200101', '20200102', '20200103', '20200106']
values1 = np.array([[[1., 2., 3., np.nan],
[2., 3., 4., np.nan],
[3., 4., 5., np.nan],
[4., 5., 6., np.nan],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan]],
[[np.nan, 4., 6., 1.],
[np.nan, 3., 5., 1.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 2., 4., 1.],
[np.nan, 1., 3., 1.],
[np.nan, np.nan, np.nan, np.nan]],
[[6., 2., np.nan, 4.],
[6., 4., np.nan, 4.],
[6., 6., np.nan, 4.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[6., 8., np.nan, 4.]]])
values2 = np.array([[[1., np.nan, 6.],
[2., np.nan, 6.],
[3., np.nan, 6.],
[4., np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 6.]],
[[2., 4., 2.],
[3., 3., 4.],
[4., np.nan, 6.],
[5., 2., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 8.]],
[[3., 6., np.nan],
[4., 5., np.nan],
[5., np.nan, np.nan],
[6., 4., np.nan],
[np.nan, 3., np.nan],
[np.nan, np.nan, np.nan]],
[[np.nan, 1., 4.],
[np.nan, 1., 4.],
[np.nan, np.nan, 4.],
[np.nan, 1., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 4.]]])
print(df1.rename(index=pd.to_datetime))
print(df2.rename(index=pd.to_datetime))
print(df3.rename(index=pd.to_datetime))
hp1 = stack_dataframes(dfs={'000001.SZ': df1, '000002.SZ': df2, '000003.SZ': df3},
stack_along='shares')
hp2 = stack_dataframes(dfs={'000001.SZ': df1, '000002.SZ': df2, '000003.SZ': df3},
stack_along='shares',
shares='000100, 000300, 000200')
print('hp1 is:\n', hp1)
print('hp2 is:\n', hp2)
self.assertEqual(hp1.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp1.shares, ['000001.SZ', '000002.SZ', '000003.SZ'])
self.assertTrue(np.allclose(hp1.values, values1, equal_nan=True))
self.assertEqual(hp2.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp2.shares, ['000100', '000300', '000200'])
self.assertTrue(np.allclose(hp2.values, values1, equal_nan=True))
hp3 = stack_dataframes(dfs={'close': df1, 'high': df2, 'low': df3},
stack_along='htypes')
hp4 = stack_dataframes(dfs={'close': df1, 'low': df2, 'high': df3},
stack_along='htypes',
htypes='open, close, high')
print('hp3 is:\n', hp3.values)
print('hp4 is:\n', hp4.values)
self.assertEqual(hp3.htypes, ['close', 'high', 'low'])
self.assertEqual(hp3.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp3.values, values2, equal_nan=True))
self.assertEqual(hp4.htypes, ['open', 'close', 'high'])
self.assertEqual(hp4.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp4.values, values2, equal_nan=True))
def test_to_csv(self):
pass
def test_to_hdf(self):
pass
def test_fill_na(self):
"""测试填充无效值"""
print(self.hp)
new_values = self.hp.values.astype(float)
new_values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]] = np.nan
print(new_values)
temp_hp = qt.HistoryPanel(values=new_values, levels=self.hp.levels, rows=self.hp.rows, columns=self.hp.columns)
self.assertTrue(np.allclose(temp_hp.values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]], np.nan, equal_nan=True))
temp_hp.fillna(2.3)
filled_values = new_values.copy()
filled_values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]] = 2.3
self.assertTrue(np.allclose(temp_hp.values,
filled_values, equal_nan=True))
def test_fill_inf(self):
"""测试填充无限值"""
def test_get_history_panel(self):
# TODO: implement this test case
# test get only one line of data
pass
def test_get_price_type_raw_data(self):
shares = '000039.SZ, 600748.SH, 000040.SZ'
start = '20200101'
end = '20200131'
htypes = 'open, high, low, close'
target_price_000039 = [[9.45, 9.49, 9.12, 9.17],
[9.46, 9.56, 9.4, 9.5],
[9.7, 9.76, 9.5, 9.51],
[9.7, 9.75, 9.7, 9.72],
[9.73, 9.77, 9.7, 9.73],
[9.83, 9.85, 9.71, 9.72],
[9.85, 9.85, 9.75, 9.79],
[9.96, 9.96, 9.83, 9.86],
[9.87, 9.94, 9.77, 9.93],
[9.82, 9.9, 9.76, 9.87],
[9.8, 9.85, 9.77, 9.82],
[9.84, 9.86, 9.71, 9.72],
[9.83, 9.93, 9.81, 9.86],
[9.7, 9.87, 9.7, 9.82],
[9.83, 9.86, 9.69, 9.79],
[9.8, 9.94, 9.8, 9.86]]
target_price_600748 = [[5.68, 5.68, 5.32, 5.37],
[5.62, 5.68, 5.46, 5.65],
[5.72, 5.72, 5.61, 5.62],
[5.76, 5.77, 5.6, 5.73],
[5.78, 5.84, 5.73, 5.75],
[5.89, 5.91, 5.76, 5.77],
[6.03, 6.04, 5.87, 5.89],
[5.94, 6.07, 5.94, 6.02],
[5.96, 5.98, 5.88, 5.97],
[6.04, 6.06, 5.95, 5.96],
[5.98, 6.04, 5.96, 6.03],
[6.1, 6.11, 5.89, 5.94],
[6.02, 6.12, 6., 6.1],
[5.96, 6.05, 5.88, 6.01],
[6.03, 6.03, 5.95, 5.99],
[6.02, 6.12, 5.99, 5.99]]
target_price_000040 = [[3.63, 3.83, 3.63, 3.65],
[3.99, 4.07, 3.97, 4.03],
[4.1, 4.11, 3.93, 3.95],
[4.12, 4.13, 4.06, 4.11],
[4.13, 4.19, 4.07, 4.13],
[4.27, 4.28, 4.11, 4.12],
[4.37, 4.38, 4.25, 4.29],
[4.34, 4.5, 4.32, 4.41],
[4.28, 4.35, 4.2, 4.34],
[4.41, 4.43, 4.29, 4.31],
[4.42, 4.45, 4.36, 4.41],
[4.51, 4.56, 4.33, 4.35],
[4.35, 4.55, 4.31, 4.55],
[4.3, 4.41, 4.22, 4.36],
[4.27, 4.44, 4.23, 4.34],
[4.23, 4.27, 4.18, 4.25]]
print(f'test get price type raw data with single thread')
df_list = get_price_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, freq='d')
self.assertIsInstance(df_list, dict)
self.assertEqual(len(df_list), 3)
self.assertTrue(np.allclose(df_list['000039.SZ'].values, np.array(target_price_000039)))
self.assertTrue(np.allclose(df_list['600748.SH'].values, np.array(target_price_600748)))
self.assertTrue(np.allclose(df_list['000040.SZ'].values, np.array(target_price_000040)))
print(f'in get financial report type raw data, got DataFrames: \n"000039.SZ":\n'
f'{df_list["000039.SZ"]}\n"600748.SH":\n'
f'{df_list["600748.SH"]}\n"000040.SZ":\n{df_list["000040.SZ"]}')
print(f'test get price type raw data with with multi threads')
df_list = get_price_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, freq='d', parallel=10)
self.assertIsInstance(df_list, dict)
self.assertEqual(len(df_list), 3)
self.assertTrue(np.allclose(df_list['000039.SZ'].values, np.array(target_price_000039)))
self.assertTrue(np.allclose(df_list['600748.SH'].values, np.array(target_price_600748)))
self.assertTrue(np.allclose(df_list['000040.SZ'].values, np.array(target_price_000040)))
print(f'in get financial report type raw data, got DataFrames: \n"000039.SZ":\n'
f'{df_list["000039.SZ"]}\n"600748.SH":\n'
f'{df_list["600748.SH"]}\n"000040.SZ":\n{df_list["000040.SZ"]}')
def test_get_financial_report_type_raw_data(self):
shares = '000039.SZ, 600748.SH, 000040.SZ'
start = '20160101'
end = '20201231'
htypes = 'eps,basic_eps,diluted_eps,total_revenue,revenue,total_share,' \
'cap_rese,undistr_porfit,surplus_rese,net_profit'
target_eps_000039 = [[1.41],
[0.1398],
[-0.0841],
[-0.1929],
[0.37],
[0.1357],
[0.1618],
[0.1191],
[1.11],
[0.759],
[0.3061],
[0.1409],
[0.81],
[0.4187],
[0.2554],
[0.1624],
[0.14],
[-0.0898],
[-0.1444],
[0.1291]]
target_eps_600748 = [[0.41],
[0.22],
[0.22],
[0.09],
[0.42],
[0.23],
[0.22],
[0.09],
[0.36],
[0.16],
[0.15],
[0.07],
[0.47],
[0.19],
[0.12],
[0.07],
[0.32],
[0.22],
[0.14],
[0.07]]
target_eps_000040 = [[-0.6866],
[-0.134],
[-0.189],
[-0.036],
[-0.6435],
[0.05],
[0.062],
[0.0125],
[0.8282],
[1.05],
[0.985],
[0.811],
[0.41],
[0.242],
[0.113],
[0.027],
[0.19],
[0.17],
[0.17],
[0.064]]
target_basic_eps_000039 = [[1.3980000e-01, 1.3980000e-01, 6.3591954e+10, 6.3591954e+10],
[-8.4100000e-02, -8.4100000e-02, 3.9431807e+10, 3.9431807e+10],
[-1.9290000e-01, -1.9290000e-01, 1.5852177e+10, 1.5852177e+10],
[3.7000000e-01, 3.7000000e-01, 8.5815341e+10, 8.5815341e+10],
[1.3570000e-01, 1.3430000e-01, 6.1660271e+10, 6.1660271e+10],
[1.6180000e-01, 1.6040000e-01, 4.2717729e+10, 4.2717729e+10],
[1.1910000e-01, 1.1900000e-01, 1.9099547e+10, 1.9099547e+10],
[1.1100000e+00, 1.1000000e+00, 9.3497622e+10, 9.3497622e+10],
[7.5900000e-01, 7.5610000e-01, 6.6906147e+10, 6.6906147e+10],
[3.0610000e-01, 3.0380000e-01, 4.3560398e+10, 4.3560398e+10],
[1.4090000e-01, 1.4050000e-01, 1.9253639e+10, 1.9253639e+10],
[8.1000000e-01, 8.1000000e-01, 7.6299930e+10, 7.6299930e+10],
[4.1870000e-01, 4.1710000e-01, 5.3962706e+10, 5.3962706e+10],
[2.5540000e-01, 2.5440000e-01, 3.3387152e+10, 3.3387152e+10],
[1.6240000e-01, 1.6200000e-01, 1.4675987e+10, 1.4675987e+10],
[1.4000000e-01, 1.4000000e-01, 5.1111652e+10, 5.1111652e+10],
[-8.9800000e-02, -8.9800000e-02, 3.4982614e+10, 3.4982614e+10],
[-1.4440000e-01, -1.4440000e-01, 2.3542843e+10, 2.3542843e+10],
[1.2910000e-01, 1.2860000e-01, 1.0412416e+10, 1.0412416e+10],
[7.2000000e-01, 7.1000000e-01, 5.8685804e+10, 5.8685804e+10]]
target_basic_eps_600748 = [[2.20000000e-01, 2.20000000e-01, 5.29423397e+09, 5.29423397e+09],
[2.20000000e-01, 2.20000000e-01, 4.49275653e+09, 4.49275653e+09],
[9.00000000e-02, 9.00000000e-02, 1.59067065e+09, 1.59067065e+09],
[4.20000000e-01, 4.20000000e-01, 8.86555586e+09, 8.86555586e+09],
[2.30000000e-01, 2.30000000e-01, 5.44850143e+09, 5.44850143e+09],
[2.20000000e-01, 2.20000000e-01, 4.34978927e+09, 4.34978927e+09],
[9.00000000e-02, 9.00000000e-02, 1.73793793e+09, 1.73793793e+09],
[3.60000000e-01, 3.60000000e-01, 8.66375241e+09, 8.66375241e+09],
[1.60000000e-01, 1.60000000e-01, 4.72875116e+09, 4.72875116e+09],
[1.50000000e-01, 1.50000000e-01, 3.76879016e+09, 3.76879016e+09],
[7.00000000e-02, 7.00000000e-02, 1.31785454e+09, 1.31785454e+09],
[4.70000000e-01, 4.70000000e-01, 7.23391685e+09, 7.23391685e+09],
[1.90000000e-01, 1.90000000e-01, 3.76072215e+09, 3.76072215e+09],
[1.20000000e-01, 1.20000000e-01, 2.35845364e+09, 2.35845364e+09],
[7.00000000e-02, 7.00000000e-02, 1.03831865e+09, 1.03831865e+09],
[3.20000000e-01, 3.20000000e-01, 6.48880919e+09, 6.48880919e+09],
[2.20000000e-01, 2.20000000e-01, 3.72209142e+09, 3.72209142e+09],
[1.40000000e-01, 1.40000000e-01, 2.22563924e+09, 2.22563924e+09],
[7.00000000e-02, 7.00000000e-02, 8.96647052e+08, 8.96647052e+08],
[4.80000000e-01, 4.80000000e-01, 6.61917508e+09, 6.61917508e+09]]
target_basic_eps_000040 = [[-1.34000000e-01, -1.34000000e-01, 2.50438755e+09, 2.50438755e+09],
[-1.89000000e-01, -1.89000000e-01, 1.32692347e+09, 1.32692347e+09],
[-3.60000000e-02, -3.60000000e-02, 5.59073338e+08, 5.59073338e+08],
[-6.43700000e-01, -6.43700000e-01, 6.80576162e+09, 6.80576162e+09],
[5.00000000e-02, 5.00000000e-02, 6.38891620e+09, 6.38891620e+09],
[6.20000000e-02, 6.20000000e-02, 5.23267082e+09, 5.23267082e+09],
[1.25000000e-02, 1.25000000e-02, 2.22420874e+09, 2.22420874e+09],
[8.30000000e-01, 8.30000000e-01, 8.67628947e+09, 8.67628947e+09],
[1.05000000e+00, 1.05000000e+00, 5.29431716e+09, 5.29431716e+09],
[9.85000000e-01, 9.85000000e-01, 3.56822382e+09, 3.56822382e+09],
[8.11000000e-01, 8.11000000e-01, 1.06613439e+09, 1.06613439e+09],
[4.10000000e-01, 4.10000000e-01, 8.13102532e+09, 8.13102532e+09],
[2.42000000e-01, 2.42000000e-01, 5.17971521e+09, 5.17971521e+09],
[1.13000000e-01, 1.13000000e-01, 3.21704120e+09, 3.21704120e+09],
[2.70000000e-02, 2.70000000e-02, 8.41966738e+08, 8.24272235e+08],
[1.90000000e-01, 1.90000000e-01, 3.77350171e+09, 3.77350171e+09],
[1.70000000e-01, 1.70000000e-01, 2.38643892e+09, 2.38643892e+09],
[1.70000000e-01, 1.70000000e-01, 1.29127117e+09, 1.29127117e+09],
[6.40000000e-02, 6.40000000e-02, 6.03256858e+08, 6.03256858e+08],
[1.30000000e-01, 1.30000000e-01, 1.66572918e+09, 1.66572918e+09]]
target_total_share_000039 = [[3.5950140e+09, 4.8005360e+09, 2.1573660e+10, 3.5823430e+09],
[3.5860750e+09, 4.8402300e+09, 2.0750827e+10, 3.5823430e+09],
[3.5860750e+09, 4.9053550e+09, 2.0791307e+10, 3.5823430e+09],
[3.5845040e+09, 4.8813110e+09, 2.1482857e+10, 3.5823430e+09],
[3.5831490e+09, 4.9764250e+09, 2.0926816e+10, 3.2825850e+09],
[3.5825310e+09, 4.8501270e+09, 2.1020418e+10, 3.2825850e+09],
[2.9851110e+09, 5.4241420e+09, 2.2438350e+10, 3.2825850e+09],
[2.9849890e+09, 4.1284000e+09, 2.2082769e+10, 3.2825850e+09],
[2.9849610e+09, 4.0838010e+09, 2.1045994e+10, 3.2815350e+09],
[2.9849560e+09, 4.2491510e+09, 1.9694345e+10, 3.2815350e+09],
[2.9846970e+09, 4.2351600e+09, 2.0016361e+10, 3.2815350e+09],
[2.9828890e+09, 4.2096630e+09, 1.9734494e+10, 3.2815350e+09],
[2.9813960e+09, 3.4564240e+09, 1.8562738e+10, 3.2793790e+09],
[2.9803530e+09, 3.0759650e+09, 1.8076208e+10, 3.2793790e+09],
[2.9792680e+09, 3.1376690e+09, 1.7994776e+10, 3.2793790e+09],
[2.9785770e+09, 3.1265850e+09, 1.7495053e+10, 3.2793790e+09],
[2.9783640e+09, 3.1343850e+09, 1.6740840e+10, 3.2035780e+09],
[2.9783590e+09, 3.1273880e+09, 1.6578389e+10, 3.2035780e+09],
[2.9782780e+09, 3.1169280e+09, 1.8047639e+10, 3.2035780e+09],
[2.9778200e+09, 3.1818630e+09, 1.7663145e+10, 3.2035780e+09]]
target_total_share_600748 = [[1.84456289e+09, 2.60058426e+09, 5.72443733e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.72096899e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.65738237e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.50257806e+09, 4.58026529e+08],
[1.84456289e+09, 2.59868164e+09, 5.16741523e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 5.14677280e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 4.94955591e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 4.79001451e+09, 4.44998882e+08],
[1.84456289e+09, 3.11401684e+09, 4.46326988e+09, 4.01064256e+08],
[1.84456289e+09, 3.11596723e+09, 4.45419136e+09, 4.01064256e+08],
[1.84456289e+09, 3.11596723e+09, 4.39652948e+09, 4.01064256e+08],
[1.84456289e+09, 3.18007783e+09, 4.26608403e+09, 4.01064256e+08],
[1.84456289e+09, 3.10935622e+09, 3.78417688e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.65806574e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.62063090e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.50063915e+09, 3.65651701e+08],
[1.41889453e+09, 3.55940850e+09, 3.22272993e+09, 3.62124939e+08],
[1.41889453e+09, 3.56129650e+09, 3.11477476e+09, 3.62124939e+08],
[1.41889453e+09, 3.59632888e+09, 3.06836903e+09, 3.62124939e+08],
[1.08337087e+09, 3.37400726e+07, 3.00918704e+09, 3.62124939e+08]]
target_total_share_000040 = [[1.48687387e+09, 1.06757900e+10, 8.31900755e+08, 2.16091994e+08],
[1.48687387e+09, 1.06757900e+10, 7.50177302e+08, 2.16091994e+08],
[1.48687387e+09, 1.06757899e+10, 9.90255974e+08, 2.16123282e+08],
[1.48687387e+09, 1.06757899e+10, 1.03109866e+09, 2.16091994e+08],
[1.48687387e+09, 1.06757910e+10, 2.07704745e+09, 2.16123282e+08],
[1.48687387e+09, 1.06757910e+10, 2.09608665e+09, 2.16123282e+08],
[1.48687387e+09, 1.06803833e+10, 2.13354083e+09, 2.16123282e+08],
[1.48687387e+09, 1.06804090e+10, 2.11489364e+09, 2.16123282e+08],
[1.33717327e+09, 8.87361727e+09, 2.42939924e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 2.34220254e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 2.16390368e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 1.07961915e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 8.58866066e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 6.87024393e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 5.71554565e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 5.54241222e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361726e+09, 5.10059576e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361726e+09, 4.59351639e+08, 1.88489589e+08],
[4.69593364e+08, 2.78355875e+08, 4.13430814e+08, 1.88489589e+08],
[4.69593364e+08, 2.74235459e+08, 3.83557678e+08, 1.88489589e+08]]
target_net_profit_000039 = [[np.nan],
[2.422180e+08],
[np.nan],
[2.510113e+09],
[np.nan],
[1.102220e+09],
[np.nan],
[4.068455e+09],
[np.nan],
[1.315957e+09],
[np.nan],
[3.158415e+09],
[np.nan],
[1.066509e+09],
[np.nan],
[7.349830e+08],
[np.nan],
[-5.411600e+08],
[np.nan],
[2.271961e+09]]
target_net_profit_600748 = [[np.nan],
[4.54341757e+08],
[np.nan],
[9.14476670e+08],
[np.nan],
[5.25360283e+08],
[np.nan],
[9.24502415e+08],
[np.nan],
[4.66560302e+08],
[np.nan],
[9.15265285e+08],
[np.nan],
[2.14639674e+08],
[np.nan],
[7.45093049e+08],
[np.nan],
[2.10967312e+08],
[np.nan],
[6.04572711e+08]]
target_net_profit_000040 = [[np.nan],
[-2.82458846e+08],
[np.nan],
[-9.57130872e+08],
[np.nan],
[9.22114527e+07],
[np.nan],
[1.12643819e+09],
[np.nan],
[1.31715269e+09],
[np.nan],
[5.39940093e+08],
[np.nan],
[1.51440838e+08],
[np.nan],
[1.75339071e+08],
[np.nan],
[8.04740415e+07],
[np.nan],
[6.20445815e+07]]
print('test get financial data, in multi thread mode')
df_list = get_financial_report_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, parallel=4)
self.assertIsInstance(df_list, tuple)
self.assertEqual(len(df_list), 4)
self.assertEqual(len(df_list[0]), 3)
self.assertEqual(len(df_list[1]), 3)
self.assertEqual(len(df_list[2]), 3)
self.assertEqual(len(df_list[3]), 3)
# 检查确认所有数据类型正确
self.assertTrue(all(isinstance(item, pd.DataFrame) for subdict in df_list for item in subdict.values()))
# 检查是否有空数据
print(all(item.empty for subdict in df_list for item in subdict.values()))
# 检查获取的每组数据正确,且所有数据的顺序一致, 如果取到空数据,则忽略
if df_list[0]['000039.SZ'].empty:
print(f'income data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000039.SZ'].values, target_basic_eps_000039))
if df_list[0]['600748.SH'].empty:
print(f'income data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['600748.SH'].values, target_basic_eps_600748))
if df_list[0]['000040.SZ'].empty:
print(f'income data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000040.SZ'].values, target_basic_eps_000040))
if df_list[1]['000039.SZ'].empty:
print(f'indicator data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000039.SZ'].values, target_eps_000039))
if df_list[1]['600748.SH'].empty:
print(f'indicator data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['600748.SH'].values, target_eps_600748))
if df_list[1]['000040.SZ'].empty:
print(f'indicator data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000040.SZ'].values, target_eps_000040))
if df_list[2]['000039.SZ'].empty:
print(f'balance data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000039.SZ'].values, target_total_share_000039))
if df_list[2]['600748.SH'].empty:
print(f'balance data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['600748.SH'].values, target_total_share_600748))
if df_list[2]['000040.SZ'].empty:
print(f'balance data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000040.SZ'].values, target_total_share_000040))
if df_list[3]['000039.SZ'].empty:
print(f'cash flow data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000039.SZ'].values, target_net_profit_000039, equal_nan=True))
if df_list[3]['600748.SH'].empty:
print(f'cash flow data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['600748.SH'].values, target_net_profit_600748, equal_nan=True))
if df_list[3]['000040.SZ'].empty:
print(f'cash flow data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000040.SZ'].values, target_net_profit_000040, equal_nan=True))
print('test get financial data, in single thread mode')
df_list = get_financial_report_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, parallel=0)
self.assertIsInstance(df_list, tuple)
self.assertEqual(len(df_list), 4)
self.assertEqual(len(df_list[0]), 3)
self.assertEqual(len(df_list[1]), 3)
self.assertEqual(len(df_list[2]), 3)
self.assertEqual(len(df_list[3]), 3)
# 检查确认所有数据类型正确
self.assertTrue(all(isinstance(item, pd.DataFrame) for subdict in df_list for item in subdict.values()))
# 检查是否有空数据,因为网络问题,有可能会取到空数据
self.assertFalse(all(item.empty for subdict in df_list for item in subdict.values()))
# 检查获取的每组数据正确,且所有数据的顺序一致, 如果取到空数据,则忽略
if df_list[0]['000039.SZ'].empty:
print(f'income data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000039.SZ'].values, target_basic_eps_000039))
if df_list[0]['600748.SH'].empty:
print(f'income data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['600748.SH'].values, target_basic_eps_600748))
if df_list[0]['000040.SZ'].empty:
print(f'income data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000040.SZ'].values, target_basic_eps_000040))
if df_list[1]['000039.SZ'].empty:
print(f'indicator data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000039.SZ'].values, target_eps_000039))
if df_list[1]['600748.SH'].empty:
print(f'indicator data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['600748.SH'].values, target_eps_600748))
if df_list[1]['000040.SZ'].empty:
print(f'indicator data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000040.SZ'].values, target_eps_000040))
if df_list[2]['000039.SZ'].empty:
print(f'balance data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000039.SZ'].values, target_total_share_000039))
if df_list[2]['600748.SH'].empty:
print(f'balance data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['600748.SH'].values, target_total_share_600748))
if df_list[2]['000040.SZ'].empty:
print(f'balance data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000040.SZ'].values, target_total_share_000040))
if df_list[3]['000039.SZ'].empty:
print(f'cash flow data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000039.SZ'].values, target_net_profit_000039, equal_nan=True))
if df_list[3]['600748.SH'].empty:
print(f'cash flow data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['600748.SH'].values, target_net_profit_600748, equal_nan=True))
if df_list[3]['000040.SZ'].empty:
print(f'cash flow data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000040.SZ'].values, target_net_profit_000040, equal_nan=True))
def test_get_composite_type_raw_data(self):
pass
class TestUtilityFuncs(unittest.TestCase):
def setUp(self):
pass
def test_time_string_format(self):
print('Testing qt.time_string_format() function:')
t = 3.14
self.assertEqual(time_str_format(t), '3s 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '3s ')
self.assertEqual(time_str_format(t, short_form=True), '3"140')
self.assertEqual(time_str_format(t, estimation=True, short_form=True), '3"')
t = 300.14
self.assertEqual(time_str_format(t), '5min 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '5min ')
self.assertEqual(time_str_format(t, short_form=True), "5'140")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "5'")
t = 7435.0014
self.assertEqual(time_str_format(t), '2hrs 3min 55s 1.4ms')
self.assertEqual(time_str_format(t, estimation=True), '2hrs ')
self.assertEqual(time_str_format(t, short_form=True), "2H3'55\"001")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "2H")
t = 88425.0509
self.assertEqual(time_str_format(t), '1days 33min 45s 50.9ms')
self.assertEqual(time_str_format(t, estimation=True), '1days ')
self.assertEqual(time_str_format(t, short_form=True), "1D33'45\"051")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "1D")
def test_str_to_list(self):
self.assertEqual(str_to_list('a,b,c,d,e'), ['a', 'b', 'c', 'd', 'e'])
self.assertEqual(str_to_list('a, b, c '), ['a', 'b', 'c'])
self.assertEqual(str_to_list('a, b: c', sep_char=':'), ['a,b', 'c'])
self.assertEqual(str_to_list('abc'), ['abc'])
self.assertEqual(str_to_list(''), [])
self.assertRaises(AssertionError, str_to_list, 123)
def test_list_or_slice(self):
str_dict = {'close': 0, 'open': 1, 'high': 2, 'low': 3}
self.assertEqual(list_or_slice(slice(1, 2, 1), str_dict), slice(1, 2, 1))
self.assertEqual(list_or_slice('open', str_dict), [1])
self.assertEqual(list(list_or_slice('close, high, low', str_dict)), [0, 2, 3])
self.assertEqual(list(list_or_slice('close:high', str_dict)), [0, 1, 2])
self.assertEqual(list(list_or_slice(['open'], str_dict)), [1])
self.assertEqual(list(list_or_slice(['open', 'high'], str_dict)), [1, 2])
self.assertEqual(list(list_or_slice(0, str_dict)), [0])
self.assertEqual(list(list_or_slice([0, 2], str_dict)), [0, 2])
self.assertEqual(list(list_or_slice([True, False, True, False], str_dict)), [0, 2])
def test_labels_to_dict(self):
target_list = [0, 1, 10, 100]
target_dict = {'close': 0, 'open': 1, 'high': 2, 'low': 3}
target_dict2 = {'close': 0, 'open': 2, 'high': 1, 'low': 3}
self.assertEqual(labels_to_dict('close, open, high, low', target_list), target_dict)
self.assertEqual(labels_to_dict(['close', 'open', 'high', 'low'], target_list), target_dict)
self.assertEqual(labels_to_dict('close, high, open, low', target_list), target_dict2)
self.assertEqual(labels_to_dict(['close', 'high', 'open', 'low'], target_list), target_dict2)
def test_input_to_list(self):
""" test util function input_to_list()"""
self.assertEqual(input_to_list(5, 3), [5, 5, 5])
self.assertEqual(input_to_list(5, 3, 0), [5, 5, 5])
self.assertEqual(input_to_list([5], 3, 0), [5, 0, 0])
self.assertEqual(input_to_list([5, 4], 3, 0), [5, 4, 0])
def test_regulate_date_format(self):
self.assertEqual(regulate_date_format('2019/11/06'), '20191106')
self.assertEqual(regulate_date_format('2019-11-06'), '20191106')
self.assertEqual(regulate_date_format('20191106'), '20191106')
self.assertEqual(regulate_date_format('191106'), '20061119')
self.assertEqual(regulate_date_format('830522'), '19830522')
self.assertEqual(regulate_date_format(datetime.datetime(2010, 3, 15)), '20100315')
self.assertEqual(regulate_date_format(pd.Timestamp('2010.03.15')), '20100315')
self.assertRaises(ValueError, regulate_date_format, 'abc')
self.assertRaises(ValueError, regulate_date_format, '2019/13/43')
def test_list_to_str_format(self):
self.assertEqual(list_to_str_format(['close', 'open', 'high', 'low']),
'close,open,high,low')
self.assertEqual(list_to_str_format(['letters', ' ', '123 4', 123, ' kk l']),
'letters,,1234,kkl')
self.assertEqual(list_to_str_format('a string input'),
'a,string,input')
self.assertEqual(list_to_str_format('already,a,good,string'),
'already,a,good,string')
self.assertRaises(AssertionError, list_to_str_format, 123)
def test_is_trade_day(self):
"""test if the funcion maybe_trade_day() and is_market_trade_day() works properly
"""
date_trade = '20210401'
date_holiday = '20210102'
date_weekend = '20210424'
date_seems_trade_day = '20210217'
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
self.assertTrue(maybe_trade_day(date_trade))
self.assertFalse(maybe_trade_day(date_holiday))
self.assertFalse(maybe_trade_day(date_weekend))
self.assertTrue(maybe_trade_day(date_seems_trade_day))
self.assertTrue(maybe_trade_day(date_too_early))
self.assertTrue(maybe_trade_day(date_too_late))
self.assertTrue(maybe_trade_day(date_christmas))
self.assertTrue(is_market_trade_day(date_trade))
self.assertFalse(is_market_trade_day(date_holiday))
self.assertFalse(is_market_trade_day(date_weekend))
self.assertFalse(is_market_trade_day(date_seems_trade_day))
self.assertFalse(is_market_trade_day(date_too_early))
self.assertFalse(is_market_trade_day(date_too_late))
self.assertTrue(is_market_trade_day(date_christmas))
self.assertFalse(is_market_trade_day(date_christmas, exchange='XHKG'))
date_trade = pd.to_datetime('20210401')
date_holiday = pd.to_datetime('20210102')
date_weekend = pd.to_datetime('20210424')
self.assertTrue(maybe_trade_day(date_trade))
self.assertFalse(maybe_trade_day(date_holiday))
self.assertFalse(maybe_trade_day(date_weekend))
def test_weekday_name(self):
""" test util func weekday_name()"""
self.assertEqual(weekday_name(0), 'Monday')
self.assertEqual(weekday_name(1), 'Tuesday')
self.assertEqual(weekday_name(2), 'Wednesday')
self.assertEqual(weekday_name(3), 'Thursday')
self.assertEqual(weekday_name(4), 'Friday')
self.assertEqual(weekday_name(5), 'Saturday')
self.assertEqual(weekday_name(6), 'Sunday')
def test_list_truncate(self):
""" test util func list_truncate()"""
l = [1,2,3,4,5]
ls = list_truncate(l, 2)
self.assertEqual(ls[0], [1, 2])
self.assertEqual(ls[1], [3, 4])
self.assertEqual(ls[2], [5])
self.assertRaises(AssertionError, list_truncate, l, 0)
self.assertRaises(AssertionError, list_truncate, 12, 0)
self.assertRaises(AssertionError, list_truncate, 0, l)
def test_maybe_trade_day(self):
""" test util function maybe_trade_day()"""
self.assertTrue(maybe_trade_day('20220104'))
self.assertTrue(maybe_trade_day('2021-12-31'))
self.assertTrue(maybe_trade_day(pd.to_datetime('2020/03/06')))
self.assertFalse(maybe_trade_day('2020-01-01'))
self.assertFalse(maybe_trade_day('2020/10/06'))
self.assertRaises(TypeError, maybe_trade_day, 'aaa')
def test_prev_trade_day(self):
"""test the function prev_trade_day()
"""
date_trade = '20210401'
date_holiday = '20210102'
prev_holiday = pd.to_datetime(date_holiday) - pd.Timedelta(2, 'd')
date_weekend = '20210424'
prev_weekend = pd.to_datetime(date_weekend) - pd.Timedelta(1, 'd')
date_seems_trade_day = '20210217'
prev_seems_trade_day = '20210217'
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
self.assertEqual(pd.to_datetime(prev_trade_day(date_trade)),
pd.to_datetime(date_trade))
self.assertEqual(pd.to_datetime(prev_trade_day(date_holiday)),
pd.to_datetime(prev_holiday))
self.assertEqual(pd.to_datetime(prev_trade_day(date_weekend)),
pd.to_datetime(prev_weekend))
self.assertEqual(pd.to_datetime(prev_trade_day(date_seems_trade_day)),
pd.to_datetime(prev_seems_trade_day))
self.assertEqual(pd.to_datetime(prev_trade_day(date_too_early)),
pd.to_datetime(date_too_early))
self.assertEqual(pd.to_datetime(prev_trade_day(date_too_late)),
pd.to_datetime(date_too_late))
self.assertEqual(pd.to_datetime(prev_trade_day(date_christmas)),
pd.to_datetime(date_christmas))
def test_next_trade_day(self):
""" test the function next_trade_day()
"""
date_trade = '20210401'
date_holiday = '20210102'
next_holiday = pd.to_datetime(date_holiday) + pd.Timedelta(2, 'd')
date_weekend = '20210424'
next_weekend = pd.to_datetime(date_weekend) + pd.Timedelta(2, 'd')
date_seems_trade_day = '20210217'
next_seems_trade_day = '20210217'
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
self.assertEqual(pd.to_datetime(next_trade_day(date_trade)),
pd.to_datetime(date_trade))
self.assertEqual(pd.to_datetime(next_trade_day(date_holiday)),
pd.to_datetime(next_holiday))
self.assertEqual(pd.to_datetime(next_trade_day(date_weekend)),
pd.to_datetime(next_weekend))
self.assertEqual(pd.to_datetime(next_trade_day(date_seems_trade_day)),
pd.to_datetime(next_seems_trade_day))
self.assertEqual(pd.to_datetime(next_trade_day(date_too_early)),
pd.to_datetime(date_too_early))
self.assertEqual(pd.to_datetime(next_trade_day(date_too_late)),
pd.to_datetime(date_too_late))
self.assertEqual(pd.to_datetime(next_trade_day(date_christmas)),
pd.to_datetime(date_christmas))
def test_prev_market_trade_day(self):
""" test the function prev_market_trade_day()
"""
date_trade = '20210401'
date_holiday = '20210102'
prev_holiday = pd.to_datetime(date_holiday) - pd.Timedelta(2, 'd')
date_weekend = '20210424'
prev_weekend = pd.to_datetime(date_weekend) - pd.Timedelta(1, 'd')
date_seems_trade_day = '20210217'
prev_seems_trade_day = pd.to_datetime(date_seems_trade_day) -
|
pd.Timedelta(7, 'd')
|
pandas.Timedelta
|
import requests
import zipfile
import io
import pandas as pd
import datetime
from def_market_off import market_off
from def_date_string import date_string
import math
import mibian
from def_expiry import expiry
import numpy as np
# from kaleido.scopes.plotly import PlotlyScope # pip install kaleido (another method to save plotly html graph as png)
from scipy.stats import norm # needed for norm error in windows
path = 'C:/Users/alex1/PycharmProjects/bhav/'
def bhav_options(dayback=-1,root=path): # dayback = -2 = yday, 0 today
try:
market_is_off = market_off(dayback,root)[0]
"""
Url to save bhvcopy and other options related data such as IV, OI etc
"""
"""
Get expiry date
Generating expiry date was the most trickiest part. The logic is 1st get current month,
then last Thursday of month, then check if holiday is there else shift one day before,
then check if current date is greater than expiry if so shift to next month
"""
datenow = datetime.date.today() + datetime.timedelta(dayback)
year = datenow.strftime('%Y')
month_num = datenow.strftime('%m')
expiry_list = expiry(year =int(year),root=root)
exp_day = expiry_list[int(month_num)]
exp_int1 = datetime.datetime.strptime(exp_day, '%d-%m-%Y').date()
exp_int = int(exp_int1.strftime('%Y%m%d'))
now_int = int(datenow.strftime('%Y%m%d'))
"if current date < expiry"
if now_int < exp_int:
exp_day_format = datetime.datetime.strptime(exp_day, '%d-%m-%Y').date()
exp_month = exp_day_format.strftime('%b')
expirydate = exp_day_format.strftime('%d-' + exp_month + '-%Y')
print('expiry date ', expirydate)
else:
exp_day = expiry_list[int(month_num) + 1]
exp_day_format = datetime.datetime.strptime(exp_day, '%d-%m-%Y').date()
exp_month = exp_day_format.strftime('%b')
expirydate = exp_day_format.strftime('%d-' + exp_month + '-%Y')
print('expiry date rolled for next month', expirydate)
exp_int_new = datetime.datetime.strptime(exp_day, '%d-%m-%Y').date()
exp_int_new = int(exp_int_new.strftime('%Y%m%d'))
dt_1 = datetime.date.today() + datetime.timedelta(dayback) # -1 for yday and 0 for today
"""
It 1st checks if selected date for bhavcopy is holiday or weekend which is useful for
running the same code in loop to download several historical bhavcopies
Bhavcopy url is dynamic i.e. it changes as month and date changes so we keep the constant
part in url as it is and create variable for dynamic part using datetime library and scrape
bhavcopy using requests. As usual I am not relying on some NSE scraping library which makes
code fragile.
Example of an url:
If current F&O series is 2021 Jun and selected date for bhavcopy(default is previous day)
is 1st June 2021 then bhavcopy url looks like this (do note that it's a zip file)
#'http://www1.nseindia.com/content/historical/DERIVATIVES/2021/JUN/fo01JUN2021bhav.csv.zip'
"""
if not market_is_off:
month_today = dt_1.strftime("%b")
m_m = month_today.upper()# NSE URL specific format
sday = dt_1.strftime('%d' + m_m + '%Y')
year = dt_1.strftime("%Y")
url = "http://www1.nseindia.com/content/historical/DERIVATIVES/" + year + "/" + m_m + "/fo" + sday + "bhav.csv.zip"
got_headers = {'Connection': 'keep-alive',
'Host': 'www1.nseindia.com',
'Origin': 'https://www1.nseindia.com',
'Referer': 'https://www1.nseindia.com/products/content/derivatives/equities/archieve_fo.htm',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'}
request = requests.get(url, headers=got_headers)
zipDocument = zipfile.ZipFile(io.BytesIO(request.content))
"""
To avoid clutter it will create folder name 'storage' if not already and store Bhavcopies
using dynamic filename ex. fo<todayd_date>bhav.csv
"""
zipDocument.extractall(root + 'storage/')
ds = 'fo' + sday + 'bhav.csv'
qs = root + 'storage/' + ds
"read earlier unzipped csv"
df1 = pd.read_csv(qs, header='infer', skiprows=0)
"Bhavcopy contains all F&O scrips but we also need to create Nifty specific dataframe for index F&O"
df2 = df1[df1['SYMBOL'] == 'NIFTY']
"Keep rows for only current month F&O series"
df3 = df2[df2['EXPIRY_DT'] == expirydate]
df3 = df3.reset_index()
"""
We need Nifty Futures last quote to get ATM strike
Also, for IVs we are going to use Nifty F instead of spot as underlying while
keeping interest rate 0 (#NIFTYFUTURES premium take cares of risk free interest rate)
"""
nifty_price = df3.iloc[0]['CLOSE']
nifty_o = df3.iloc[0]['OPEN']
nifty_h = df3.iloc[0]['HIGH']
nifty_l = df3.iloc[0]['LOW']
nifty_v = df3.iloc[0]['CONTRACTS'] * 75
"""
Get At The Money strike (ATM) by simply using floor value of Nifty F last closing price
For ex. if last closing price for NF is 15608 then floor is 15600 which is our ATM
"""
df4 = df3.drop(axis=0, index=0) # dropped unwanted 1st row
def rounddn(x):
return int(math.floor(x / 100.0)) * 100
roundp = rounddn(nifty_price) # Lower round number for put
"""
Here major change from existing practice. As we know far OTM strikes on NSE are not that liquid like
the US so we have to adjust accordingly and remove noise. How?
Get 10 strikes near ATM in multiples of x100 (avoid 16050,17050) as they tend to be most
traded contracts.
"""
dfsmall1 = df4[df4.loc[:, 'STRIKE_PR'] <= roundp + 600]
dfsmall2 = dfsmall1[dfsmall1.loc[:, 'STRIKE_PR'] >= roundp - 500]
dfsmall_ce = dfsmall2[dfsmall2.loc[:, 'OPTION_TYP'] == 'CE']
dfsmall_pe = dfsmall2[dfsmall2.loc[:, 'OPTION_TYP'] == 'PE']
dfsmall_ce = dfsmall_ce.iloc[::2]
dfsmall_pe = dfsmall_pe.iloc[::2]
"Now since our own mini option chain is ready, let's calculate IVs using Mibian library"
dfsmall_ce = dfsmall_ce.reset_index()
dfsmall_pe = dfsmall_pe.reset_index()
"mibian.BS([Underlying Price, Strike Price, Interest Rate, Days To Expiration], Call/Put Price)"
days_to_exp = (exp_day_format - datenow).days
ceivlist = []
# Mibian BS function is tough to vectorize, so we have to use for loop here
for lence in range(len(dfsmall_ce)):
#lence=0
#mibian.BS([nifty_price , dfsmall_ce.iloc[lence]['STRIKE_PR'], 0, days_to_exp], 22).callPrice #get CE price
iv = mibian.BS([nifty_price, dfsmall_ce.iloc[lence]['STRIKE_PR'], 0, days_to_exp],
callPrice=dfsmall_ce.iloc[lence]['CLOSE']).impliedVolatility
ceivlist.append(iv)
dfsmall_ce['iv'] = ceivlist
peivlist = []
for lenpe in range(len(dfsmall_pe)):
#lenpe = 9
peiv = mibian.BS([nifty_price, dfsmall_pe.iloc[lenpe]['STRIKE_PR'], 0, days_to_exp],
putPrice=dfsmall_pe.iloc[lenpe]['CLOSE']).impliedVolatility
peivlist.append(peiv)
dfsmall_pe['iv'] = peivlist
#...........................................................................................................
"""
Calculate max pain
If you don't know max pain then to know more read this
https://zerodha.com/varsity/chapter/max-pain-pcr-ratio/
"""
# 1st Put strikes
peloss_1st = []
for p in range(0,len(dfsmall_pe)):
#p = 2
# print(dfsmall_pe.iloc[p]['STRIKE_PR'])
peloss_i = dfsmall_pe.iloc[p]['OPEN_INT']*(100* p)
peloss_1st.append(peloss_i)
#shortcut method to calculate loss per strike
# Idea is OI of a single strike remains the same while strikes changes by constant number i,e,100
denom_a = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
demon_b = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
loss_len = len(peloss_1st)
peloss_mainlist = []
pe_seriesloss_l = [sum(peloss_1st)] # this is the main value we looking for
for lo in range(0, loss_len-1):
# lo = 0
denom_main = [i / j for i, j in zip(denom_a[lo:], demon_b[:-(lo + 1)])]
peloss_list = [i / j for i, j in zip(peloss_1st[lo + 2:], denom_main)]
peloss_mainlist.append(peloss_list)
pe_seriesloss_l.append(sum(peloss_list))
# .............................................................................................................
# CE strikes for option pain
dfsmall_ce_inv = dfsmall_ce.copy().sort_values('STRIKE_PR', axis=0, ascending=False, inplace=False,
kind='quicksort', na_position='last')
celoss_1st = []
for c in range(0, len(dfsmall_ce)):
# i = 2
# print(dfsmall_pe.iloc[i]['STRIKE_PR'])
celoss_i = dfsmall_ce_inv.iloc[c]['OPEN_INT'] * (100 * c)
celoss_1st.append(celoss_i)
denom_ca = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
demon_cb = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
loss_clen = len(celoss_1st)
celoss_mainlist = []
ce_seriesloss_l = [sum(celoss_1st)] #this is the main value we looking for
for clo in range(0, loss_clen - 1):
# clo = 0
denom_cmain = [i / j for i, j in zip(denom_ca[clo:], demon_cb[:-(clo + 1)])]
celoss_list = [i / j for i, j in zip(celoss_1st[clo + 2:], denom_cmain)]
celoss_mainlist.append(celoss_list)
ce_seriesloss_l.append(sum(celoss_list))
ce_seriesloss_l.reverse() # reverse sort order
# calculate net loss for CE and PE
netloss_list = [i + j for i, j in zip(pe_seriesloss_l, ce_seriesloss_l)]
df_pain =pd.DataFrame({'Strikes':dfsmall_pe['STRIKE_PR'].to_list(),'Pain':netloss_list})
df_pain['Mp_strike'] = np.where(df_pain['Pain'] == df_pain['Pain'].min(),df_pain['Strikes'],0)
max_pain = df_pain['Mp_strike'].max()
"#for plotting max pain along with IVs per strike"
df_pain['CE_IV']= dfsmall_ce['iv']
df_pain['PE_IV']= dfsmall_pe['iv']
#......................................................................................................
"Calculate pcr open interest & pcr weighted"
pcroi = round(dfsmall_pe['OPEN_INT'].sum() / dfsmall_ce['OPEN_INT'].sum(), 2)
pcrwt = round(dfsmall_pe['VAL_INLAKH'].sum() / dfsmall_ce['VAL_INLAKH'].sum(), 2)
"Calculate most active CE for a day (max change in OI) and highest CE OI for series"
rep = dfsmall_ce['CHG_IN_OI'].abs()
rep = rep.astype('float64')
valu = rep.idxmax()
activecall = dfsmall_ce.iloc[valu]['STRIKE_PR']
activecalloi = dfsmall_ce.iloc[valu]['OPEN_INT']
ceiv_mean = round(dfsmall_ce['iv'].mean(), 2)
"Calculate most active PE for a day (max change in OI) and highest PE OI for the series"
rep_pe = dfsmall_pe['CHG_IN_OI'].abs()
rep_pe = rep_pe.astype('float64')
valu_pe = rep_pe.idxmax()
activeput = dfsmall_pe.iloc[valu_pe]['STRIKE_PR']
activeputoi = dfsmall_pe.iloc[valu_pe]['OPEN_INT']
peiv_mean = round(dfsmall_pe['iv'].mean(), 2)
repmx = dfsmall_ce['OPEN_INT'].abs()
repmx = repmx.astype('float64')
valumx = repmx.idxmax()
activecallmx = dfsmall_ce.iloc[valumx]['STRIKE_PR']
activecalloimx = dfsmall_ce.iloc[valumx]['OPEN_INT']
rep_mxpe = dfsmall_pe['OPEN_INT'].abs()
rep_mxpe = rep_mxpe.astype('float64')
valu_mxpe = rep_mxpe.idxmax()
activeputmx = dfsmall_pe.iloc[valu_mxpe]['STRIKE_PR']
activeputoimx = dfsmall_pe.iloc[valu_mxpe]['OPEN_INT']
#..........................................................................................
"""
Now we gonna compare today's values with historical values (I have added historical data)
From here on run this file daily after 12.00 AM IST so it downloads prev days bhavcopy
Why prev day instead of current day? Well NSE sometimes upload bhavcopy very late so in such
cases script might download the same bhavcopy
In case, if you fail to download, then adjust dayback=-1,
for instance for day before yesterday dayback=-2
You can use that in loop to download all historical data as well
"""
coi = (root + 'CE.txt')
poi = (root + 'PE.txt')
pcrw = (root + 'pcrw.txt')
pcroi_file = (root + 'pcroi.txt')
peiv_file = (root + 'peiv.txt')
ceiv_file = (root + 'ceiv.txt')
activece_file = (root + 'activece_strike.txt')
activepe_file = (root + 'activepe_strike.txt')
activeceoi_file = (root + 'activece_oi.txt')
activepeoi_file = (root + 'activepe_oi.txt')
nffile = root + 'Nifty4options.txt'
"# chg pcroi"
dfpcroi = pd.read_csv(pcroi_file)
pcroi_prev = dfpcroi.iloc[-1][3]
pcroi_chg = round(float(pcroi) - float(pcroi_prev), 2)
"# chg pcr wt"
t2 = pd.read_csv(pcrw, sep=",")
pcr1 = t2.iloc[-1][3]
pcr_wt_ch = pcrwt - pcr1
# chg ivs for pe and ce
dfpeiv = pd.read_csv(peiv_file, header=None)
peiv_prev = dfpeiv.iloc[-1][3]
peiv_chg = round(float(peiv_mean) - float(peiv_prev), 2)
dfceiv = pd.read_csv(ceiv_file,header=None)
ceiv_prev = dfceiv.iloc[-1][3]
ceiv_chg = round(float(ceiv_mean) - float(ceiv_prev), 2)
"#iv rank calculations"
cutoff = 75 # max historical period
dfceiv = dfceiv.tail(cutoff)
dfpeiv = dfpeiv.tail(cutoff)
#todo: append current values so rank wont be negative below
dtx = dt_1.strftime("%Y%m%d")
dfpeiv_current = pd.DataFrame(['PEIV', dtx, peiv_mean, peiv_mean, peiv_mean, peiv_mean]).transpose()
dfceiv_current = pd.DataFrame(['CEIV', dtx, ceiv_mean, ceiv_mean, ceiv_mean, ceiv_mean]).transpose()
dfpeiv_all = pd.concat([dfpeiv,dfpeiv_current],axis=0)
dfceiv_all = pd.concat([dfceiv, dfceiv_current], axis=0)
dfpeiv_all = dfpeiv_all.reset_index()
dfceiv_all = dfceiv_all.reset_index()
dfpeiv_all[4] = pd.to_numeric(dfpeiv_all[4])
peiv_rank = 100*((peiv_mean - dfpeiv_all[4].min())/(dfpeiv_all[4].max() - dfpeiv_all[4].min()))
dfceiv_all = dfceiv_all.dropna()
dfceiv_all[4] = pd.to_numeric(dfceiv_all[4])
ceiv_rank = 100*((ceiv_mean - dfceiv_all[4].min())/(dfceiv_all[4].max() - dfceiv_all[4].min()))
"# iv percentile calculations"
dfceiv_all['low_close'] = np.where(dfceiv_all[4]<ceiv_mean,1,0)
ceiv_percentile = 100*(dfceiv_all['low_close'].sum()/cutoff)
dfpeiv_all['low_close'] = np.where(dfpeiv_all[4]<peiv_mean,1,0)
peiv_percentile = 100*(dfpeiv_all['low_close'].sum()/cutoff)
"# ce unwinding"
list_ce_unwind = (dfsmall_ce[dfsmall_ce['CHG_IN_OI'] < 0]).STRIKE_PR.to_list()
list_pe_unwind = (dfsmall_pe[dfsmall_pe['CHG_IN_OI'] < 0]).STRIKE_PR.to_list()
"# list of top 20 futures chnage in OI with scenarios"
df_no_nf = df1[df1['OPTION_TYP'] == 'XX']
# df_no_bnf = df_no_nf[df_no_nf['SYMBOL'] != 'BANKNIFTY']
df_no_nf_current = df_no_nf.copy()[df_no_nf['EXPIRY_DT'] == expirydate]
df_no_bnf_current = df_no_nf_current.sort_values('VAL_INLAKH', ascending=False)
df_active10 = df_no_bnf_current.head(20)
long_buildup_toplist = df_active10[(df_active10['CHG_IN_OI'] >= 0) &
(df_active10['CLOSE'] >= df_active10['OPEN'])].SYMBOL.to_list()
short_buildup_toplist = df_active10[(df_active10['CHG_IN_OI'] >= 0) &
(df_active10['CLOSE'] < df_active10['OPEN'])].SYMBOL.to_list()
short_cover_toplist = df_active10[(df_active10['CHG_IN_OI'] < 0) &
(df_active10['CLOSE'] >= df_active10['OPEN'])].SYMBOL.to_list()
long_liquid_toplist = df_active10[(df_active10['CHG_IN_OI'] < 0) &
(df_active10['CLOSE'] < df_active10['OPEN'])].SYMBOL.to_list()
long_sum =len(long_buildup_toplist)
short_sum = len(short_buildup_toplist)
longliq_sum = len(long_liquid_toplist)
shortcover_sum = len(short_cover_toplist)
ceunwind_sum = len(list_ce_unwind)
peunwind_sum = len(list_pe_unwind)
"# Now update current calculations to historical file"
timeindex = dt_1.strftime("%H:%M:%S")
#dateindex =dt_1.strftime("%Y%m%d,%H:%M:%S") # for Niftyf backward compatibility date index is date,time
# dataframe(df) nf 4 (for) option 'o'
df_nf4o = pd.DataFrame(['NiftyF', dtx, timeindex, nifty_o, nifty_h, nifty_l, nifty_price, nifty_v]).transpose()
df_nf4o.to_csv(nffile, mode='a', index=False, header=False)
df_pcroi = pd.DataFrame(['PCROI', dtx, pcroi, pcroi, pcroi, pcroi]).transpose()
df_pcroi.to_csv(root + 'pcroi.txt', mode='a', index=False, header=False)
df_peiv = pd.DataFrame(['PEIV', dtx, peiv_mean, peiv_mean, peiv_mean, peiv_mean]).transpose()
df_peiv.to_csv(root + 'peiv.txt', mode='a', index=False, header=False)
df_ceiv = pd.DataFrame(['CEIV', dtx, ceiv_mean, ceiv_mean, ceiv_mean, ceiv_mean]).transpose()
df_ceiv.to_csv(root + 'ceiv.txt', mode='a', index=False, header=False)
df_activece_f = pd.DataFrame(
['Active_CEstrike', dtx, activecallmx, activecallmx, activecallmx, activecallmx]).transpose()
df_activece_f.to_csv(activece_file, mode='a', index=False, header=False)
df_activepe_f = pd.DataFrame(
['Active_PEstrike', dtx, activeputmx, activeputmx, activeputmx, activeputmx]).transpose()
df_activepe_f.to_csv(activepe_file, mode='a', index=False, header=False)
df_activeceoi_f = pd.DataFrame(
['Active_CEOI', dtx, activecalloimx, activecalloimx, activecalloimx, activecalloimx]).transpose()
df_activeceoi_f.to_csv(activeceoi_file, mode='a', index=False, header=False)
df_activepeoi_f = pd.DataFrame(
['Active_PEOI', dtx, activeputoimx, activeputoimx, activeputoimx, activeputoimx]).transpose()
df_activepeoi_f.to_csv(activepeoi_file, mode='a', index=False, header=False)
df_CE = pd.DataFrame(['CE', dtx, activecalloi, activecalloi, activecalloi, activecalloi]).transpose()
df_CE.to_csv(coi, mode='a', index=False, header=False)
df_PE = pd.DataFrame(['PE', dtx, activeputoi, activeputoi, activeputoi, activeputoi]).transpose()
df_PE.to_csv(poi, mode='a', index=False, header=False)
df_pcrwr = pd.DataFrame(['pcrw', dtx, pcrwt, pcrwt, pcrwt, pcrwt]).transpose()
df_pcrwr.to_csv(pcrw, mode='a', index=False, header=False)
"#NFDaily, pcr oi, most active CE OI chg, most active CE OI chg daily history, index to datetime and make dfall"
dfnf = pd.read_csv(root + 'Nifty4options.txt', header=None)
dfnf.columns = ['symbol', 'date', 'time', 'o', 'h', 'l', 'c', 'v']
dfnf['date'] = pd.to_datetime(dfnf['date'], format='%Y%m%d')
dfnf = dfnf.set_index('date', drop=False, inplace=False)
dfpcr = pd.read_csv(root + 'pcroi.txt', header=None)
my_columns = ['symbol', 'date', 'o', 'h', 'l', 'c']
dfpcr.columns = my_columns # ['symbol', 'date', 'o', 'h', 'l', 'c']
dfpcr['date'] = pd.to_datetime(dfpcr['date'], format='%Y%m%d')
dfpcr = dfpcr.set_index('date', drop=False, inplace=False)
dfhybrid = dfnf.merge(dfpcr['c'], left_index=True, right_index=True, how='inner')
dfhybrid = dfhybrid[['c_x', 'v', 'c_y']]
dfhybrid.columns = ['NF_Close', 'Volume', 'PCR_OI']
dfce = pd.read_csv(root + 'ceiv.txt', header=None)
dfce.columns = my_columns # ['symbol', 'date', 'o', 'h', 'l', 'c']
dfce['date'] =
|
pd.to_datetime(dfce['date'], format='%Y%m%d')
|
pandas.to_datetime
|
import numpy as np
import pandas as pd
from numba import jit
ngjit = jit(nopython=True, nogil=True)
# Based on: https://github.com/holoviz/spatialpandas/blob/
# 9252a7aba5f8bc7a435fffa2c31018af8d92942c/spatialpandas/dask.py
def _hilbert_distance(gdf, total_bounds, p):
"""
Calculate hilbert distance for a GeoDataFrame
int coordinates
Parameters
----------
gdf : GeoDataFrame
total_bounds : Total bounds of geometries - array
p : The number of iterations used in constructing the Hilbert curve
Returns
---------
Pandas Series containing hilbert distances
"""
# Compute bounds as array
bounds = gdf.bounds.to_numpy()
# Compute hilbert distances
coords = _continuous_to_discrete_coords(total_bounds, bounds, p)
distances = _distances_from_coordinates(p, coords)
return
|
pd.Series(distances, index=gdf.index, name="hilbert_distance")
|
pandas.Series
|
#%%
import numpy as np
import pandas as pd
import altair as alt
import anthro.io
# Generate a plot for EEA non-indigenous marine species
data =
|
pd.read_csv('../processed/non_ind_marine_species_europe.csv')
|
pandas.read_csv
|
#!/usr/bin/env python
# coding: utf-8
# # Environment Setup
# ## Once
# In[1]:
from PIL import Image
import pytesseract
# Change output format
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
#InteractiveShell.ast_node_interactivity = "last_expr"
# Import packages
import warnings
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
from sklearn.cluster import KMeans
import json
import requests
#!pip install folium
#import folium
import random
import imageio
import matplotlib.patches as mpatches
from sklearn.linear_model import LinearRegression
import matplotlib.patches as patches
import matplotlib.colors as colors
import math
from io import BytesIO
#from tabulate import tabulate
import time
#from chinese_calendar import is_workday, is_holiday
#from xgboost.sklearn import XGBRegressor
from sklearn.model_selection import GridSearchCV,RandomizedSearchCV
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
#from xgboost import plot_importance
from PIL import Image, ImageEnhance
import cv2
import re
# Other settings
warnings.filterwarnings('ignore')
get_ipython().run_line_magic('matplotlib', 'inline')
pd.set_option('display.max_columns',None)
#pd.set_option('display.max_rows', None)
#sns.set(font='SimHei')
# -*- coding: utf-8 -*-
#import cx_Oracle
import os
#os.environ['NLS_LANG'] = 'SIMPLIFIED CHINESE_CHINA.UTF8'
#from collections import Counter
# df.to_pickle('./Cache/cache_df_nd_2019.pkl')
# pd.read_pickle('samples')
def see(df):
display(df.head(2))
print(df.shape)
def see_null(df):
col_list = df.columns
print('空值情况:')
for i in col_list:
null = df[i].isnull().sum()
print(i+': '+str(null))
# ## Control
# In[2]:
#InteractiveShell.ast_node_interactivity = "last_expr"
InteractiveShell.ast_node_interactivity = "all"
|
pd.set_option('display.max_columns', None)
|
pandas.set_option
|
# Copyright 2019 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Adapted by <NAME> in November,2019 from this Colab notebook:
#https://colab.research.google.com/github/google-research/bert/blob/master/predicting_movie_reviews_with_bert_on_tf_hub.ipynb.
#Changes includes
# - Reading our stressor data and parsing it properly
# - reconfiguring the last layer to include N neurons corresponding to N categories
# - correcting the probability output so that it follows [0,1] proper pattern
# - better analysis with confusion matrix
# - exporting to pb format for tensorflow serving api
import os
os.environ['LD_LIBRARY_PATH'] = '/usr/local/cuda-10.0/lib64'
os.environ['CUDA_VISIBLE_DEVICES']='0'
import sys
print(sys.executable)
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedShuffleSplit
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
from datetime import datetime
import matplotlib.pyplot as plt
from sklearn.utils.multiclass import unique_labels
from sklearn.metrics import f1_score,confusion_matrix,classification_report,accuracy_score
import logging
logging.basicConfig(stream=sys.stdout, level=logging.ERROR) #INFO)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_colwidth', 1000)
config = tf.ConfigProto()
#config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
#config.gpu_options.visible_device_list="0"
from tensorflow.python.client import device_lib
device_lib.list_local_devices()
import bert
from bert import run_classifier_with_tfhub
from bert import optimization
from bert import tokenization
from bert import modeling
import numpy as np
############ Utils functions ##################
def create_examples_prediction(df):
"""Creates examples for the training and dev sets."""
examples = []
for index, row in df.iterrows():
#labels = row[LABEL_HOT_VECTOR].strip('][').split(', ')
#labels = [float(x) for x in labels]
labels = list(row[label_list_text])
examples.append(labels)
return pd.DataFrame(examples)
def f(x):
n = 2 # index of the second proability to get labeled
index = np.argsort(x.values.flatten().tolist())[-n:][0]
print(f"index is {index}")
label = label_list_text[index]
print(f"label is {label}")
return label
final_columns = ["sOrder","Input.text","is_stressor","is_stressor_conf","top_label","second_label","Branch", "Above SD-THRESHOLD","SD-THRESHOLD","SD","Other","Everyday Decision Making","Work","Social Relationships","Financial Problem","Health, Fatigue, or Physical Pain","Emotional Turmoil","Family Issues","School","avg_severity","median_severity","SD_severity","Votes","Source"]
def get_test_experiment_df(test):
test_predictions = [x[0]['probabilities'] for x in zip(getListPrediction(in_sentences=list(test[DATA_COLUMN])))]
test_live_labels = np.array(test_predictions).argmax(axis=1)
test[LABEL_COLUMN_RAW] = [label_list_text[x] for x in test_live_labels] # appending the labels to the dataframe
probabilities_df_live = pd.DataFrame(test_predictions) # creating a proabilities dataset
probabilities_df_live.columns = [x for x in label_list_text] # naming the columns
probabilities_df_live['second_label'] = probabilities_df_live.apply(lambda x:f(x),axis=1)
#print(test)
#label_df = create_examples_prediction(test)
#label_df.columns = label_list_text
#label_df['label 2'] = label_df.apply(lambda x:f(x),axis=1)
test.reset_index(inplace=True,drop=True) # resetting index
test_removed_columns = list(set(test.columns)-set(probabilities_df_live.columns))
test_temp = test[test_removed_columns]
experiment_df = pd.concat([test_temp,probabilities_df_live],axis=1, ignore_index=False)
missing_cols = list(set(experiment_df.columns)-set(final_columns))
experiment_df[missing_cols] = np.nan#.loc[:, missing_cols] = np.nan
experiment_df = experiment_df.reindex(columns = final_columns)
#experiment_df = experiment_df.reindex(sorted(experiment_df.columns), axis=1)
return test,experiment_df
def getListPrediction(in_sentences):
#1
input_examples = [bert.run_classifier.InputExample(guid="", text_a = x, text_b = None, label = 0) for x in in_sentences] # here, "" is just a dummy label
#2
input_features = bert.run_classifier.convert_examples_to_features(input_examples, label_list, MAX_SEQ_LENGTH, tokenizer)
#3
predict_input_fn = bert.run_classifier.input_fn_builder(features=input_features, seq_length=MAX_SEQ_LENGTH, is_training=False, drop_remainder=False)
print(input_features[0].input_ids)
#4
predictions = estimator.predict(input_fn=predict_input_fn,yield_single_examples=True)
return predictions
is_normalize_active=False
def get_confusion_matrix(y_test,predicted,labels):
class_names=labels
# plotting confusion matrix
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plot_confusion_matrix(y_test, predicted, classes=class_names,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plot_confusion_matrix(y_test, predicted, classes=class_names, normalize=True,
title='Normalized confusion matrix')
plt.show()
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
classes =classes
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
#print("Normalized confusion matrix")
else:
test =1
#print('Confusion matrix, without normalization')
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
#ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
#fig.tight_layout()
return ax
####### Loading the data #####
def data_prep_bert(df,test_size,trail_nb):
#print("Filling missing values")
#df[DATA_COLUMN] = df[DATA_COLUMN].fillna('_NA_')
print("Splitting dataframe with shape {} into training and test datasets".format(df.shape))
X_train, X_test = train_test_split(df,random_state=trail_nb, test_size=test_size,stratify = df[LABEL_COLUMN_RAW])
#print(X_test.head(10))
return X_train, X_test
def open_dataset(NAME,mapping_index,excluded_categories):
df =
|
pd.read_csv(PATH+NAME+'.csv',sep =',')
|
pandas.read_csv
|
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from math_helpers.constants import *
from traj import lambert
from traj.meeus_alg import meeus
from traj.conics import get_rv_frm_elements
from traj.bplane import bplane_vinf
import pandas as pd
from math_helpers.time_systems import get_JD, cal_from_jd
from math_helpers import matrices as mat
from math_helpers.vectors import vcrossv
def launchwindows(departure_planet, departure_date, arrival_planet,
arrival_window, dm=None, center='sun', run_test=False):
"""return plots of c3 and vinf values for a 0 rev lambert transfer
between a departure and arrival planet within a given arrival window.
:param departure_planet: name of departure planet (str)
:param departure_date: date of departure ('yyyy-mm-dd')
:param arrival_planet: name of arrival planet (str)
:param arrival_window: list with begin and end arrival date window
['yyyy-mm-dd', 'yyyy-mm-dd']
:param dm: direction of motion (optional); if None, then the script
will auto-compute direction based on the change in true
anomaly
:param center: point where both planets are orbiting about;
default = 'sun'
:param run_test: run unit tests with lower days and bypass plots
"""
# reinitializing
dep_date = departure_date
dp = departure_planet
ap = arrival_planet
# get departure julian dates
dep_date = pd.to_datetime(dep_date)
dep_JD = get_JD(dep_date.year, dep_date.month, dep_date.day, \
dep_date.hour, dep_date.minute, dep_date.second)
days = 1000
if run_test:
days = 300
# get arrival windows
arrival_window = pd.to_datetime(arrival_window)
arrival_window = np.linspace(arrival_window[0].value, arrival_window[1].value, days)
arrival_window = pd.to_datetime(arrival_window)
# get state of departure planet
dep_elements = meeus(dep_JD, planet=dp)
s_dep_planet = get_rv_frm_elements(dep_elements, center=center, method='sma')
r_dep_planet = s_dep_planet[:3]
v_dep_planet = s_dep_planet[3:6]
# initializing arrival dataframe
columns = ['tof_d', 'TOF', 'v_inf_dep', 'v_inf_arr', 'c3', 'vinf']
transfer = pd.DataFrame(index=arrival_window, columns=columns)
for date in arrival_window:
transfer['tof_d'][date] = date.date()
# get state of arrival planet at the current arrival date
arrival_jdate = get_JD(date.year, date.month, date.day, \
date.hour, date.minute, date.second)
arr_elements = meeus(arrival_jdate, planet=ap)
s_arrival = get_rv_frm_elements(arr_elements, center=center, method='sma')
r_arr_planet = s_arrival[:3]
v_arr_planet = s_arrival[3:6]
# convert date since departure to seconds
transfer['TOF'][date] = (date - dep_date).total_seconds()
# compute lambert solution at current arrival date
vi, vf = lambert.lambert_univ(r_dep_planet, r_arr_planet, \
transfer['TOF'][date], dm=dm,
center=center,
dep_planet=dp, arr_planet=ap)
# compute hyperbolic departure/arrival velocities
transfer['v_inf_dep'][date] = vi - v_dep_planet
transfer['v_inf_arr'][date] = vf - v_arr_planet
# compute c3 values at departure and v_inf values at arrival
transfer['c3'][date] = norm(transfer['v_inf_dep'][date])**2
transfer['vinf'][date] = norm(transfer['v_inf_arr'][date])
# get values and dates of min c3/v_inf
minc3 = transfer['c3'].min()
minc3_date = transfer['TOF'][transfer['c3'] == minc3]
minvinf = transfer['vinf'].min()
minvinf_date = transfer['TOF'][transfer['vinf'] == minvinf]
print(f'(a) min c3 = {minc3} km2/s2 on {minc3_date.index[0]}'
f' // {transfer.loc[transfer["c3"] == minc3, "tof_d"][0]}')
print(f'(b) min v_inf = {minvinf} km/s on {minvinf_date.index[0]}'
f' // {transfer.loc[transfer["vinf"] == minvinf, "tof_d"][0]}')
if run_test:
return minc3, minvinf, \
str(minc3_date.index[0])[:10], str(minvinf_date.index[0])[:10]
# # assuming positions of planets are in the ecliptic,
# # determine Type 1 or 2 transfer
tanom1 = np.arctan2(r_dep_planet[1], r_dep_planet[0])
tanom2 = np.arctan2(r_arr_planet[1], r_arr_planet[0])
dtanom = tanom2 - tanom1
if dtanom > np.pi:
ttype = '2'
elif dtanom < np.pi:
ttype = '1'
# plots
fig=plt.figure(figsize=(12,6))
plt.style.use('seaborn')
# c3 vs tof
ax=fig.add_subplot(121)
ax.set_xlabel(f"days past departure ({departure_date})")
ax.set_ylabel("c3, km2/s2")
ax.set_title(f"c3 versus time of flight, Type {ttype}")
ax.plot(transfer['TOF']/3600/24, transfer['c3'], label='departure_c3')
ax.plot(minc3_date.values/3600/24, minc3, 'bo', markersize=12, label='min_c3')
# v_inf vs tof
ax2=fig.add_subplot(122)
ax2.set_xlabel(f"days past departure ({departure_date})")
ax2.set_ylabel(f"v_inf at {ap}, km/s")
ax2.set_title(f"v_inf at {ap} versus time of flight, Type {ttype}")
ax2.plot(transfer['TOF']/3600/24, transfer['vinf'], label='arrival_vinf')
ax2.plot(minvinf_date.values/3600/24, minvinf, 'ro', markersize=12, label='min_vinf')
ax.legend()
ax2.legend()
fig.tight_layout(pad=4.0)
plt.show()
def get_porkchops(dep_jd_init, dep_jd_fin, arr_jd_init, arr_jd_fin,
dp='earth', ap='jupiter', center='sun',
contour_tof=None, contour_c3=None,
contour_vinf=None, contour_vinf_out=None,
plot_tar=False, tar_dep=None, tar_arr=None,
shade_c3=False, shade_tof=False, shade_vinf_arr=False,
shade_vinf_range=None, shade_tof_range=None, fine_search=True):
"""generates a porkchop plot for a given launch and arrival window.
:param dep_jd_init: initial departure date (JD)
:param dep_jd_fin: final departure date (JD)
:param arr_jd_init: initial arrival date (JD)
:param arr_jd_fin: final arrival date (JD)
:param dp: departure planet
:param ap: arrival planet
:param center: center body of orbit; default='sun'
:param contour_tof: array of tof contours to plot
:param contour_c3: array of launch c3 contours to plot (optional)
:param contour_vinf: array of vinf inbound contours to plot
:param contour_vinf_out: array of vinf outbound contours to plot (optional)
:param plot_tar: plot target point (True); default=False
:param tar_dep: target departure date (JD)
:param tar_arr: target arrival date (JD)
:param shade_c3: option to shade certain c3 contours (True)
:param shade_tof: option to shade certain tof contours (True)
:param shade_vinf_arr: option to shade certain arrival vinf contours (True)
:param shade_vinf_range: array of arrival vinf range to shade
:param shade_tof_range: array of time of flight range to shade
:return df: if contour_c3 is present, [df_tof, df_c3, df_vinf_arr];
if contour_vinf_out is present,
[df_tof, df_vinf_dep, df_vinf_arr]
"""
plot_c3 = True
plot_vinf_out = True
if contour_c3 is None:
plot_c3 = False
if contour_vinf_out is None:
plot_vinf_out = False
if tar_dep is None:
pass
else:
print('segment tof (days):',tar_arr-tar_dep)
print('target departure (cal):', cal_from_jd(tar_dep, rtn='string'), '(jd)', tar_dep)
print('target arrival (cal):', cal_from_jd(tar_arr, rtn='string'), '(jd)', tar_arr)
# departure and arrival dates
dep_date_initial_cal = cal_from_jd(dep_jd_init, rtn='string')
arr_date_initial_cal = cal_from_jd(arr_jd_init, rtn='string')
dep_date_initial_cal = pd.to_datetime(dep_date_initial_cal)
arr_date_initial_cal = pd.to_datetime(arr_date_initial_cal)
# time windows
delta_dep = dep_jd_fin - dep_jd_init
delta_arr = arr_jd_fin - arr_jd_init
if fine_search:
delta = 1
else:
delta = 5
departure_window = np.linspace(dep_jd_init, dep_jd_fin, int(delta_dep/delta))
arrival_window = np.linspace(arr_jd_init, arr_jd_fin, int(delta_arr/delta))
# generate dataframes for c3, time of flight, and dep/arrival v_inf
df_c3 = pd.DataFrame(index=arrival_window, columns=departure_window)
df_tof = pd.DataFrame(index=arrival_window, columns=departure_window)
df_vinf_arr = pd.DataFrame(index=arrival_window, columns=departure_window)
df_vinf_dep = pd.DataFrame(index=arrival_window, columns=departure_window)
# loop through launch dates
for dep_JD in departure_window:
for arr_JD in arrival_window:
tof_s = (arr_JD-dep_JD)*3600*24
s_planet1 = meeus(dep_JD, planet=dp, rtn='states', ref_rtn=center)
s_planet2 = meeus(arr_JD, planet=ap, rtn='states', ref_rtn=center)
vi, vf = lambert.lambert_univ(s_planet1[:3], s_planet2[:3], tof_s,
center=center, dep_planet=dp, arr_planet=ap)
c3 = norm(vi-s_planet1[3:6])**2
vinf_arr = norm(vf - s_planet2[3:6])
vinf_dep = norm(vi - s_planet1[3:6])
df_c3[dep_JD][arr_JD] = c3
df_tof[dep_JD][arr_JD] = arr_JD-dep_JD
df_vinf_arr[dep_JD][arr_JD] = vinf_arr
df_vinf_dep[dep_JD][arr_JD] = vinf_dep
# generate contour plots
fig, ax = plt.subplots(figsize=(10,8))
plt.style.use('default')
CS_tof = ax.contour(departure_window-departure_window[0], arrival_window-arrival_window[0],
df_tof, linewidths=0.5, colors=('gray'), levels=contour_tof)
CS_vinf_arr = ax.contour(departure_window-departure_window[0], arrival_window-arrival_window[0],
df_vinf_arr, linewidths=0.5, colors=('g'), levels=contour_vinf)
if plot_vinf_out:
CS_vinf_dep = ax.contour(departure_window-departure_window[0], arrival_window-arrival_window[0],
df_vinf_dep, linewidths=0.5, colors=('b'), levels=contour_vinf_out)
if plot_c3:
CS_c3 = ax.contour(departure_window-departure_window[0], arrival_window-arrival_window[0],
df_c3, linewidths=0.5, colors=('b'), levels=contour_c3)
ax.set_title(f'pork chop plot from {dp} to {ap}')
ax.set_xlabel(f'{dp} departure dates - days since {dep_date_initial_cal}')
ax.set_ylabel(f'{ap} arrival dates - days since {arr_date_initial_cal}')
ax.clabel(CS_tof, inline=0.2, fmt="%.0f", fontsize=10)
ax.clabel(CS_vinf_arr, inline=0.2, fmt="%.1f", fontsize=10)
h1,_ = CS_tof.legend_elements()
h3,_ = CS_vinf_arr.legend_elements()
if plot_c3:
ax.clabel(CS_c3, inline=0.2, fmt="%.1f", fontsize=10)
h2,_ = CS_c3.legend_elements()
ax.legend([h1[0], h2[0], h3[0]], ['TOF, days', 'c3, km2/s2', 'v_inf_arrival, km/s'],
loc=2, facecolor='white', framealpha=1)
elif plot_vinf_out:
ax.clabel(CS_vinf_dep, inline=0.2, fmt="%.1f", fontsize=10)
h2,_ = CS_vinf_dep.legend_elements()
ax.legend([h1[0], h2[0], h3[0]], ['TOF, days', 'vinf_departure, km/s', 'v_inf_arrival, km/s'],
loc=2, facecolor='white', framealpha=1)
if plot_tar:
plt.scatter(tar_dep-dep_jd_init, tar_arr-arr_jd_init, linewidths=18, color='orange')
# shade region within these bounds
if shade_vinf_arr:
CS_vinf_arr = ax.contourf(departure_window-departure_window[0],
arrival_window-arrival_window[0], df_vinf_arr,
colors=('g'), levels=shade_vinf_range, alpha=0.3)
if shade_tof:
CS_tof = ax.contourf(departure_window-departure_window[0],
arrival_window-arrival_window[0], df_tof,
colors=('black'), levels=shade_tof_range, alpha=0.3)
plt.savefig(f'porkschops_{dp}_{ap}.png')
plt.show()
if plot_c3:
return [df_tof, df_c3, df_vinf_arr]
elif plot_vinf_out:
return [df_tof, df_vinf_dep, df_vinf_arr]
else:
return [df_tof, df_vinf_arr]
def run_pcp_search(dep_jd_init, dep_jd_fin, pl2_jd_init, pl2_jd_fin, pl3_jd_init, pl3_jd_fin,
dpl='earth', pl2='jupiter', pl3='pluto', center='sun',
c3_max=None, vinf_max=None, vinf_tol=None, rp_min=None, fine_search=False):
"""generates a porkchop plot for a given launch and arrival window.
:param dep_jd_init: initial departure date of launch planet (planet 1) (JD)
:param dep_jd_fin: final departure date of launch planet (planet 1) (JD)
:param pl2_jd_init: initial arrival date of flyby planet (planet 2) (JD)
:param pl2_jd_fin: final arrival date of flyby planet (planet 2) (JD)
:param pl3_jd_init: initial arrival date of arrival planet (planet 3) (JD)
:param pl3_jd_fin: final arrival date of arrival planet (planet 3) (JD)
:param dpl: name of departure planet (planet 1)
:param pl2: name of flyby planet (planet 2)
:param pl3: name of arrival planet (planet 3)
:param center: center body of orbit; default='sun'
:param c3_max: maximum launch c3 constraint (km2/s2)
:param vinf_max: maximum final arrival vinf at planet 3 (km/s)
:param vinf_tol: maximum allowable delta-vinf inbound/outbound of flyby (km/s)
:param rp_min: minimum radius of flyby (km)
:param fine_search: option between coarse search of 3 days interval (False);
or fine search of 0.8 days interval (True)
:return df: [dfpl1_c3, dfpl2_tof, dfpl2_vinf_in, dfpl2_vinf_out, ...
dfpl3_tof, dfpl3_vinf_in, dfpl3_rp]
in work, need to add more robustness for constraining options
"""
# departure and arrival dates
dep_date_init_cal = pd.to_datetime(cal_from_jd(dep_jd_init, rtn='string'))
pl2_jd_init_cal = pd.to_datetime(cal_from_jd(pl2_jd_init, rtn='string'))
pl3_jd_init_cal = pd.to_datetime(cal_from_jd(pl3_jd_init, rtn='string'))
# time windows
delta_dep = dep_jd_fin - dep_jd_init
delta_pl2 = pl2_jd_fin - pl2_jd_init
delta_pl3 = pl3_jd_fin - pl3_jd_init
searchint = 3
if fine_search:
searchint = 0.8
dep_window = np.linspace(dep_jd_init, dep_jd_fin, int(delta_dep/searchint))
# print(dep_window)
pl2_window = np.linspace(pl2_jd_init, pl2_jd_fin, int(delta_pl2/searchint))
pl3_window = np.linspace(pl3_jd_init, pl3_jd_fin, int(delta_pl3/searchint))
# generate dataframes for c3, time of flight, and dep/arrival v_inf
dfpl1_c3 = pd.DataFrame(index=pl2_window, columns=dep_window)
dfpl2_tof = pd.DataFrame(index=pl2_window, columns=dep_window)
dfpl2_vinf_in = pd.DataFrame(index=pl2_window, columns=dep_window)
dfpl2_vinf_out = pd.DataFrame(index=pl2_window, columns=dep_window)
dfpl3_tof = pd.DataFrame(index=pl3_window, columns=pl2_window)
dfpl3_vinf_in = pd.DataFrame(index=pl3_window, columns=pl2_window)
dfpl3_rp = pd.DataFrame(index=pl3_window, columns=pl2_window)
# loop through launch dates
for dep_JD in dep_window:
for arr_JD in pl2_window:
tof12_s = (arr_JD-dep_JD)*3600*24
s_planet1 = meeus(dep_JD, planet=dpl, rtn='states', ref_rtn=center)
s_planet2 = meeus(arr_JD, planet=pl2, rtn='states', ref_rtn=center)
vi_seg1, vf_seg1 = lambert.lambert_univ(s_planet1[:3], s_planet2[:3], tof12_s,
center=center, dep_planet=dpl, arr_planet=pl2)
c3 = norm(vi_seg1-s_planet1[3:6])**2
if c3 < c3_max:
# print('c3', c3)
for arr2_JD in pl3_window:
tof23_s = (arr2_JD-arr_JD)*3600*24
s_planet3 = meeus(arr2_JD, planet=pl3, rtn='states', ref_rtn=center)
vi_seg2, vf_seg2 = lambert.lambert_univ(s_planet2[:3], s_planet3[:3], tof23_s,
center=center, dep_planet=pl2, arr_planet=pl3)
vinf_pl2_in = norm(vf_seg1 - s_planet2[3:6])
vinf_pl2_out = norm(vi_seg2 - s_planet2[3:6])
if abs(vinf_pl2_in-vinf_pl2_out) < vinf_tol:
# print(abs(vinf_pl2_in-vinf_pl2_out))
rp = bplane_vinf(vf_seg1, vi_seg2, center=pl2, rtn_rp=True)
if rp > rp_min:
# print('rp', rp)
vinf_pl3_in = norm(vf_seg2 - s_planet3[3:6])
if vinf_pl3_in < vinf_max:
# print('vinf_pl2_out', vinf_pl2_out)
dfpl1_c3[dep_JD][arr_JD] = c3
dfpl2_tof[dep_JD][arr_JD] = arr_JD-dep_JD
dfpl2_vinf_in[dep_JD][arr_JD] = vinf_pl2_in
dfpl2_vinf_out[dep_JD][arr_JD] = vinf_pl2_out
dfpl3_tof[arr_JD][arr2_JD] = arr2_JD-arr_JD
dfpl3_vinf_in[arr_JD][arr2_JD] = vinf_pl3_in
dfpl3_rp[arr_JD][arr2_JD] = rp
return [dfpl1_c3, dfpl2_tof, dfpl2_vinf_in, dfpl2_vinf_out, dfpl3_tof, dfpl3_vinf_in, dfpl3_rp]
def search_script_multi(dep_windows, planets, center, constraints, fine_search=False):
dep_windows_cal = []
arr_windows_cal = []
# departure and arrival dates
for window in dep_windows:
dep_windows_cal.append([pd.to_datetime(cal_from_jd(jd, rtn='string')) for jd in window])
# time windows
windows = []
searchint = 3
if fine_search:
searchint = 0.8
delta_deps = [depf - depi for depi, depf in dep_windows]
for delta, window in zip(delta_deps, dep_windows):
windows.append(np.linspace(window[0], window[1], int(delta/searchint)))
dfs = []
# generate dataframes for c3, time of flight, and dep/arrival v_inf
dfpl1_c3 = pd.DataFrame(index=windows[1], columns=windows[0])
dfpl2_tof = pd.DataFrame(index=windows[1], columns=windows[0])
dfpl2_vinf_in = pd.DataFrame(index=windows[1], columns=windows[0])
dfs = [dfpl1_c3, dfpl2_tof, dfpl2_vinf_in]
if len(planets) >= 3:
dfpl2_vinf_out = pd.DataFrame(index=windows[1], columns=windows[0])
dfpl2_rp = pd.DataFrame(index=windows[1], columns=windows[0])
dfpl3_tof = pd.DataFrame(index=windows[2], columns=windows[1])
dfpl3_vinf_in = pd.DataFrame(index=windows[2], columns=windows[1])
dfs = [dfpl1_c3, dfpl2_tof, dfpl2_vinf_in, dfpl2_vinf_out, dfpl2_rp,
dfpl3_tof, dfpl3_vinf_in]
if len(planets) >= 4:
dfpl3_vinf_out = pd.DataFrame(index=windows[2], columns=windows[1])
dfpl3_rp = pd.DataFrame(index=windows[2], columns=windows[1])
dfpl4_tof = pd.DataFrame(index=windows[3], columns=windows[2])
dfpl4_vinf_in = pd.DataFrame(index=windows[3], columns=windows[2])
dfs = [dfpl1_c3, dfpl2_tof, dfpl2_vinf_in, dfpl2_vinf_out, dfpl2_rp,
dfpl3_tof, dfpl3_vinf_in, dfpl3_vinf_out, dfpl3_rp,
dfpl4_tof, dfpl4_vinf_in]
if len(planets) >= 5:
dfpl4_vinf_out = pd.DataFrame(index=windows[3], columns=windows[2])
dfpl4_rp = pd.DataFrame(index=windows[3], columns=windows[2])
dfpl5_tof = pd.DataFrame(index=windows[4], columns=windows[3])
dfpl5_vinf_in = pd.DataFrame(index=windows[4], columns=windows[3])
dfs = [dfpl1_c3, dfpl2_tof, dfpl2_vinf_in, dfpl2_vinf_out, dfpl2_rp,
dfpl3_tof, dfpl3_vinf_in, dfpl3_vinf_out, dfpl3_rp,
dfpl4_tof, dfpl4_vinf_in, dfpl4_vinf_out, dfpl4_rp,
dfpl5_tof, dfpl5_vinf_in]
if len(planets) == 6:
dfpl5_vinf_out =
|
pd.DataFrame(index=windows[4], columns=windows[3])
|
pandas.DataFrame
|
from unittest.mock import MagicMock, patch
import numpy as np
import pandas as pd
import pytest
from rayml.model_family import ModelFamily
from rayml.pipelines.components import ARIMARegressor
from rayml.problem_types import ProblemTypes
pytestmark = [
pytest.mark.noncore_dependency,
pytest.mark.skip_during_conda,
pytest.mark.skip_if_39,
]
@pytest.fixture(scope="module")
def sktime_arima():
from sktime.forecasting import arima as sktime_arima
return sktime_arima
@pytest.fixture(scope="module")
def forecasting():
from sktime.forecasting import base as forecasting
return forecasting
def test_model_family():
assert ARIMARegressor.model_family == ModelFamily.ARIMA
def test_problem_types():
assert set(ARIMARegressor.supported_problem_types) == {
ProblemTypes.TIME_SERIES_REGRESSION
}
def test_model_instance(ts_data):
X, y = ts_data
clf = ARIMARegressor()
fitted = clf.fit(X, y)
assert isinstance(fitted, ARIMARegressor)
def test_fit_ts_without_y(ts_data):
X, y = ts_data
clf = ARIMARegressor()
with pytest.raises(ValueError, match="ARIMA Regressor requires y as input."):
clf.fit(X=X)
@pytest.mark.parametrize("train_features_index_dt", [True, False])
@pytest.mark.parametrize("train_target_index_dt", [True, False])
@pytest.mark.parametrize(
"train_none, no_features, datetime_feature",
[
(True, False, False),
(False, True, False),
(False, False, True),
(False, False, False),
],
)
def test_remove_datetime(
train_features_index_dt,
train_target_index_dt,
train_none,
datetime_feature,
no_features,
get_ts_X_y,
):
X_train, _, y_train = get_ts_X_y(
train_features_index_dt,
train_target_index_dt,
train_none,
datetime_feature,
no_features,
test_features_index_dt=False,
)
if not train_none:
if train_features_index_dt:
assert isinstance(X_train.index, pd.DatetimeIndex)
else:
assert not isinstance(X_train.index, pd.DatetimeIndex)
if datetime_feature:
assert X_train.select_dtypes(include=["datetime64"]).shape[1] == 1
if train_target_index_dt:
assert isinstance(y_train.index, pd.DatetimeIndex)
else:
assert not isinstance(y_train.index, pd.DatetimeIndex)
clf = ARIMARegressor()
X_train_no_dt = clf._remove_datetime(X_train, features=True)
y_train_no_dt = clf._remove_datetime(y_train)
if train_none:
assert X_train_no_dt is None
else:
assert not isinstance(X_train_no_dt.index, pd.DatetimeIndex)
if no_features:
assert X_train_no_dt.shape[1] == 0
if datetime_feature:
assert X_train_no_dt.select_dtypes(include=["datetime64"]).shape[1] == 0
assert not isinstance(y_train_no_dt.index, pd.DatetimeIndex)
def test_match_indices(get_ts_X_y):
X_train, _, y_train = get_ts_X_y(
train_features_index_dt=False,
train_target_index_dt=False,
train_none=False,
datetime_feature=False,
no_features=False,
test_features_index_dt=False,
)
assert not X_train.index.equals(y_train.index)
clf = ARIMARegressor()
X_, y_ = clf._match_indices(X_train, y_train)
assert X_.index.equals(y_.index)
def test_set_forecast(get_ts_X_y):
from sktime.forecasting.base import ForecastingHorizon
_, X_test, _ = get_ts_X_y(
train_features_index_dt=False,
train_target_index_dt=False,
train_none=False,
datetime_feature=False,
no_features=False,
test_features_index_dt=False,
)
clf = ARIMARegressor()
fh_ = clf._set_forecast(X_test)
assert isinstance(fh_, ForecastingHorizon)
assert len(fh_) == len(X_test)
assert fh_.is_relative
def test_feature_importance(ts_data):
X, y = ts_data
clf = ARIMARegressor()
with patch.object(clf, "_component_obj"):
clf.fit(X, y)
assert clf.feature_importance == np.zeros(1)
@pytest.mark.parametrize(
"train_none, train_features_index_dt, "
"train_target_index_dt, no_features, "
"datetime_feature, test_features_index_dt",
[
(True, False, False, False, False, False),
(False, True, True, False, False, True),
(False, True, True, False, False, False),
],
)
def test_fit_predict(
train_features_index_dt,
train_target_index_dt,
train_none,
no_features,
datetime_feature,
test_features_index_dt,
get_ts_X_y,
):
from sktime.forecasting.arima import AutoARIMA
from sktime.forecasting.base import ForecastingHorizon
X_train, X_test, y_train = get_ts_X_y(
train_features_index_dt,
train_target_index_dt,
train_none,
datetime_feature,
no_features,
test_features_index_dt,
)
fh_ = ForecastingHorizon([i + 1 for i in range(len(X_test))], is_relative=True)
a_clf = AutoARIMA()
clf = a_clf.fit(X=X_train, y=y_train)
y_pred_sk = clf.predict(fh=fh_, X=X_test)
m_clf = ARIMARegressor(d=None)
m_clf.fit(X=X_train, y=y_train)
y_pred = m_clf.predict(X=X_test)
assert (y_pred_sk.values == y_pred.values).all()
assert y_pred.index.equals(X_test.index)
@pytest.mark.parametrize(
"train_none, train_features_index_dt, "
"train_target_index_dt, no_features, "
"datetime_feature, test_features_index_dt",
[
(False, False, False, False, False, False),
(False, True, False, False, False, True),
(False, False, True, False, True, False),
(False, False, True, True, False, False),
(False, True, True, True, False, False),
(False, True, True, False, True, False),
],
)
def test_fit_predict_sk_failure(
train_features_index_dt,
train_target_index_dt,
train_none,
no_features,
datetime_feature,
test_features_index_dt,
get_ts_X_y,
):
from sktime.forecasting.arima import AutoARIMA
X_train, X_test, y_train = get_ts_X_y(
train_features_index_dt,
train_target_index_dt,
train_none,
datetime_feature,
no_features,
test_features_index_dt,
)
a_clf = AutoARIMA()
with pytest.raises(Exception):
a_clf.fit(X=X_train, y=y_train)
m_clf = ARIMARegressor(d=None)
m_clf.fit(X=X_train, y=y_train)
y_pred = m_clf.predict(X=X_test)
assert isinstance(y_pred, pd.Series)
assert len(y_pred) == 10
assert y_pred.index.equals(X_test.index)
@pytest.mark.parametrize("freq_num", ["1", "2"])
@pytest.mark.parametrize("freq_str", ["T", "M", "Y"])
def test_different_time_units_out_of_sample(
freq_str, freq_num, sktime_arima, forecasting
):
from sktime.forecasting.arima import AutoARIMA
from sktime.forecasting.base import ForecastingHorizon
datetime_ = pd.date_range("1/1/1870", periods=20, freq=freq_num + freq_str)
X = pd.DataFrame(range(20), index=datetime_)
y = pd.Series(np.sin(np.linspace(-8 * np.pi, 8 * np.pi, 20)), index=datetime_)
fh_ = ForecastingHorizon([i + 1 for i in range(len(y[15:]))], is_relative=True)
a_clf = AutoARIMA()
clf = a_clf.fit(X=X[:15], y=y[:15])
y_pred_sk = clf.predict(fh=fh_, X=X[15:])
m_clf = ARIMARegressor(d=None)
m_clf.fit(X=X[:15], y=y[:15])
y_pred = m_clf.predict(X=X[15:])
assert (y_pred_sk.values == y_pred.values).all()
assert y_pred.index.equals(X[15:].index)
def test_arima_supports_boolean_features():
X = pd.DataFrame({"dates":
|
pd.date_range("2021-01-01", periods=10)
|
pandas.date_range
|
import argparse
import math
import os
from multiprocessing import Pool
import cv2
import pandas as pd
from collections import defaultdict
from pytorch_toolbelt.utils import fs
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
def main():
methods = ["Cover", "JMiPOD", "JUNIWARD", "UERD"]
errors =
|
pd.read_csv("errors.csv")
|
pandas.read_csv
|
import pandas as pd
import numpy as np
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
data = pd.read_csv('data/API_UKR_DS2_en_csv_v2_2060461.csv')
data.drop(['Unnamed: 65', 'Country Name', 'Country Code'], axis=1, inplace=True)
gdp_dict = {'years' : data.drop(['Indicator Name', 'Indicator Code'], axis=1).columns,
'value' : data[data['Indicator Code'] == 'NY.GDP.MKTP.CD'].drop(['Indicator Name', 'Indicator Code'], axis=1).to_numpy()[0]}
pd_gdp = pd.DataFrame(gdp_dict)
fig_gdp = go.Figure()
fig_gdp.add_trace(go.Scatter(x=pd_gdp.iloc[26:].years,
y=pd_gdp.iloc[26:].value,
mode='lines+markers',
name='Gaps',
hovertemplate='Ukraine (%{x})'+'<br>%{y}<extra></extra>',
line=dict(
width=2,
color='rgb(101, 145, 212)'
)))
fig_gdp.update_layout(
xaxis=dict(
fixedrange=True,
showline=False,
showgrid=False,
showticklabels=True,
zeroline=False,
ticks='outside',
tickangle=360,
nticks=6,
tickwidth=1,
tickfont=dict(
family='Arial',
size=11,
color='rgb(82, 82, 82)',
)
),
yaxis=dict(
gridcolor='rgb(150, 150, 150)',
fixedrange=True,
showgrid=True,
gridwidth=0.5,
zeroline=False,
showline=False,
showticklabels=True,
tickfont=dict(
family='Arial',
size=11,
color='rgb(82, 82, 82)',
),
),
title={
'text' : "GDP (current US$)",
},
font=dict(
size = 10,
),
width=350, height=350,
hoverlabel=dict(
bgcolor="rgb(200, 200, 200)",
font_size=16,
font_family="Arial"
),
autosize=False,
showlegend=False,
plot_bgcolor='white',
# margin=dict(l=500, r=20, t=50, b=20)
)
pop_dict = {'years' : data.drop(['Indicator Name', 'Indicator Code'], axis=1).columns,
'value' : data[data['Indicator Code'] == 'SP.POP.TOTL'].drop(['Indicator Name', 'Indicator Code'], axis=1).to_numpy()[0]}
pop_dict = pd.DataFrame(pop_dict)
fig_pop = go.Figure()
fig_pop.add_trace(go.Scatter(x=pop_dict.years,
y=pop_dict.value,
mode='lines+markers',
hovertemplate='Ukraine (%{x})'+'<br>%{y}<extra></extra>',
line=dict(
width=2,
color='rgb(101, 145, 212)'
)))
fig_pop.update_layout(
xaxis=dict(
fixedrange=True,
showline=False,
showgrid=False,
showticklabels=True,
zeroline=False,
ticks='outside',
tickangle=360,
nticks=8,
tickwidth=1,
tickfont=dict(
family='Arial',
size=8,
color='rgb(82, 82, 82)',
)
),
yaxis=dict(
gridcolor='rgb(150, 150, 150)',
fixedrange=True,
showgrid=True,
gridwidth=0.5,
zeroline=False,
showline=False,
showticklabels=True,
tickfont=dict(
family='Arial',
size=11,
color='rgb(82, 82, 82)',
),
),
title={
'text' : "Population, total",
},
font=dict(
size = 10,
),
hoverlabel=dict(
bgcolor="rgb(200, 200, 200)",
font_size=16,
font_family="Arial"
),
width=350, height=350,
autosize=False,
showlegend=False,
plot_bgcolor='white',
# margin=dict(
# autoexpand=False,
# l=500,
# r=20,
# t=110,
# ),
)
school_dict = {'years' : data.drop(['Indicator Name', 'Indicator Code'], axis=1).columns,
'value' : data[data['Indicator Code'] == 'SE.PRM.ENRR'].drop(['Indicator Name', 'Indicator Code'], axis=1).to_numpy()[0]}
school_dict = pd.DataFrame(school_dict)
fig_school = go.Figure()
fig_school.add_trace(go.Scatter(x=school_dict.iloc[11:55].years,
y=school_dict.iloc[11:55].value,
mode='lines+markers',
hovertemplate='Ukraine (%{x})'+'<br>%{y}<extra></extra>',
# texttemplate="Ukraine (%{x}) <br>(%{y})",
line=dict(
width=2,
color='rgb(101, 145, 212)'
)))
fig_school.update_layout(
xaxis=dict(
fixedrange=True,
showline=False,
showgrid=False,
showticklabels=True,
zeroline=False,
ticks='outside',
tickangle=360,
nticks=8,
tickwidth=1,
tickfont=dict(
family='Arial',
size=11,
color='rgb(82, 82, 82)',
)
),
yaxis=dict(
gridcolor='rgb(150, 150, 150)',
fixedrange=True,
showgrid=True,
gridwidth=0.5,
zeroline=False,
showline=False,
showticklabels=True,
tickfont=dict(
family='Arial',
size=11,
color='rgb(82, 82, 82)',
),
),
title={
'text' : "School enrollment, primary (% gross)",
},
font=dict(
size = 10,
),
hoverlabel=dict(
bgcolor="rgb(200, 200, 200)",
font_size=16,
font_family="Arial"
),
width=350, height=350,
autosize=False,
showlegend=False,
plot_bgcolor='white',
# margin=dict(
# autoexpand=False,
# l=500,
# r=20,
# t=110,
# ),
)
co_dict = {'years' : data.drop(['Indicator Name', 'Indicator Code'], axis=1).columns,
'value' : data[data['Indicator Code'] == 'EN.ATM.CO2E.PC'].drop(['Indicator Name', 'Indicator Code'], axis=1).to_numpy()[0]}
co_dict = pd.DataFrame(co_dict)
fig_co = go.Figure()
fig_co.add_trace(go.Scatter(x=co_dict.iloc[32:57].years,
y=co_dict.iloc[32:57].value,
mode='lines+markers',
hovertemplate='Ukraine (%{x})'+'<br>%{y}<extra></extra>',
# texttemplate="Ukraine (%{x}) <br>(%{y})",
line=dict(
width=2,
color='rgb(101, 145, 212)'
)))
fig_co.update_layout(
xaxis=dict(
fixedrange=True,
showline=False,
showgrid=False,
showticklabels=True,
zeroline=False,
ticks='outside',
tickangle=360,
nticks=7,
tickwidth=1,
tickfont=dict(
family='Arial',
size=11,
color='rgb(82, 82, 82)',
)
),
yaxis=dict(
gridcolor='rgb(150, 150, 150)',
fixedrange=True,
showgrid=True,
gridwidth=0.5,
zeroline=False,
showline=False,
showticklabels=True,
tickfont=dict(
family='Arial',
size=11,
color='rgb(82, 82, 82)',
),
),
title={
'text' : "CO2 emissions (metric tons per capita)",
},
font=dict(
size = 10,
),
hoverlabel=dict(
bgcolor="rgb(200, 200, 200)",
font_size=16,
font_family="Arial"
),
width=350, height=350,
autosize=False,
showlegend=False,
plot_bgcolor='white',
# margin=dict(
# autoexpand=False,
# l=500,
# r=20,
# t=110,
# ),
)
poverty_dict = {'years' : data.drop(['Indicator Name', 'Indicator Code'], axis=1).columns,
'value' : data[data['Indicator Code'] == 'SI.POV.NAHC'].drop(['Indicator Name', 'Indicator Code'], axis=1).to_numpy()[0]}
poverty_dict = pd.DataFrame(poverty_dict)
fig_poverty = go.Figure()
fig_poverty.add_trace(go.Scatter(x=poverty_dict.iloc[41:59].years,
y=poverty_dict.iloc[41:59].value,
mode='lines+markers',
hovertemplate='Ukraine (%{x})'+'<br>%{y}<extra></extra>',
# texttemplate="Ukraine (%{x}) <br>(%{y})",
line=dict(
width=2,
color='rgb(101, 145, 212)'
)))
fig_poverty.update_layout(
xaxis=dict(
fixedrange=True,
showline=False,
showgrid=False,
showticklabels=True,
zeroline=False,
ticks='outside',
tickangle=360,
nticks=8,
tickwidth=1,
tickfont=dict(
family='Arial',
size=11,
color='rgb(82, 82, 82)',
)
),
yaxis=dict(
gridcolor='rgb(150, 150, 150)',
fixedrange=True,
showgrid=True,
gridwidth=0.5,
zeroline=False,
showline=False,
showticklabels=True,
tickfont=dict(
family='Arial',
size=11,
color='rgb(82, 82, 82)',
),
),
title={
'text' : "Poverty headcount ratio (% of population)",
},
font=dict(
size = 10,
),
hoverlabel=dict(
bgcolor="rgb(200, 200, 200)",
font_size=16,
font_family="Arial"
),
width=350, height=350,
autosize=False,
showlegend=False,
plot_bgcolor='white',
# margin=dict(
# autoexpand=False,
# l=500,
# r=20,
# t=110,
# ),
)
life_dict = {'years' : data.drop(['Indicator Name', 'Indicator Code'], axis=1).columns,
'value' : data[data['Indicator Code'] == 'SP.DYN.LE00.IN'].drop(['Indicator Name', 'Indicator Code'], axis=1).to_numpy()[0]}
life_dict = pd.DataFrame(life_dict)
fig_life = go.Figure()
fig_life.add_trace(go.Scatter(x=life_dict.iloc[:60].years,
y=life_dict.iloc[:60].value,
mode='lines+markers',
hovertemplate='Ukraine (%{x})'+'<br>%{y}<extra></extra>',
# texttemplate="Ukraine (%{x}) <br>(%{y})",
line=dict(
width=2,
color='rgb(101, 145, 212)'
)))
fig_life.update_layout(
xaxis=dict(
fixedrange=True,
showline=False,
showgrid=False,
showticklabels=True,
zeroline=False,
ticks='outside',
tickangle=360,
nticks=7,
tickwidth=1,
tickfont=dict(
family='Arial',
size=11,
color='rgb(82, 82, 82)',
)
),
yaxis=dict(
gridcolor='rgb(150, 150, 150)',
fixedrange=True,
showgrid=True,
gridwidth=0.5,
zeroline=False,
showline=False,
showticklabels=True,
tickfont=dict(
family='Arial',
size=11,
color='rgb(82, 82, 82)',
),
),
title={
'text' : "Life expectancy at birth, total (years)",
},
font=dict(
size = 10,
),
hoverlabel=dict(
bgcolor="rgb(200, 200, 200)",
font_size=16,
font_family="Arial"
),
width=350, height=350,
autosize=False,
showlegend=False,
plot_bgcolor='white',
# margin=dict(
# autoexpand=False,
# l=500,
# r=20,
# t=110,
# ),
)
gni_dict = {'years' : data.drop(['Indicator Name', 'Indicator Code'], axis=1).columns,
'value' : data[data['Indicator Code'] == 'NY.GNP.PCAP.CD'].drop(['Indicator Name', 'Indicator Code'], axis=1).to_numpy()[0]}
gni_dict =
|
pd.DataFrame(gni_dict)
|
pandas.DataFrame
|
#!/usr/bin/env python3
"""Converts otu tables from diatom analysis into a suitable format for uploading to BIOSYS.
Additionally it will create a "otus_all.xlsx" for community analysis.
If extraction sheets are provided in an appropriate format then it will also calculate
the most similar sample and provide the Baroni–Urbani–Buser coefficient similarity
between the 2 samples."""
import sys
import os
import glob
import argparse
import pandas as pd
import send2trash
import re
from math import sqrt
import statistics as stat
from metadata import *
#Class Objects:
class Biosys_Version:
"""Stores and reports information about the current version of biosys.py."""
version = ("\033[1;34m" + "\nBiosys Version: 2019.10" + "\033[0m")
known_issues = '''\033[93mThis program is still in WIP stage \033[0m
current issues:
-Multicore processing not supported.
-Baroni-Urbani-Buser similarity not at 100% functional due to:
-Takes too long to perform for entire dataset. -solved by optional?
-Cut off for distance not determined.
-Similarity doesnt take into account repeat samples.
'''
messages = [version,known_issues]
class FormatError(Exception):
'''Formating of file is incompatible with this program.'''
pass
class MathsError(Exception):
'''A Maths or Logic error has occured in the code.'''
pass
class Diatom_Sample:
"""A slice of an OTU table, and associated metadata for a diatom sample."""
#this imports data from the table from Tim
def __init__(self, sampleid, siteid, area, region, prn, sitename, sampledate, barcode, folder):
if folder:
try:
self.folder = str(int(folder))
except ValueError:
self.folder = str(folder)
else:
self.folder = no_value
if sampleid:
try:
self.sampleid = str(int(str(sampleid).replace(".","")))
except ValueError:
self.sampleid = str(sampleid)
else:
self.sampleid = "F" + str(self.folder)
if siteid:
self.siteid = str(int(str(siteid).replace(".","")))
else:
self.siteid = no_value
if area:
self.area = get_capital(str(area))
else:
self.area = no_value
if region:
self.region = get_capital(str(region))
if region == "nan":
print("Sample " + self.folder + " has no region, check input metadata file.")
self.region = no_value
else:
self.region = no_value
if self.region or self.area == no_value:
pass
else:
self.reg_area = get_initials(region) + "-" + get_capital(area)
if prn:
try:
self.prn = int(prn)
except ValueError:
self.prn = prn
else:
self.prn = no_value
if sitename:
try:
regex = re.compile('[^a-zA-Z ]')
self.sitename = regex.sub('', sitename)
except AttributeError:
self.sitename = no_value
except TypeError:
self.sitename = no_value
else:
self.sitename = no_value
if sampledate:
self.sampledate = sampledate
else:
self.sampledate = no_value
if barcode:
self.barcode = barcode
else:
self.barcode = no_value
#sets these values to defaults just so if they arent added later they have a value
self.batch_num = no_value
self.count = 0
self.pass_fail = "Unsuccessful"
self.analysis_date = no_value
self.otu_tab = None
self.sim = 0
self.sim_sample = no_value
self.note = ""
self.plate_loc = no_value
def assign_results(self, otus, batch_num):
'''Assigns otu table results to the sample.'''
self.otu_tab = otus
try:
count = self.otu_tab[str(self.folder)].sum()
self.count = count
except KeyError:
self.count = 0
print("Seq count for " + str(self.folder) + " has been set to 0.")
if self.count >= 3000:
self.pass_fail = "Successful"
if batch_num:
self.batch_num = str(batch_num).split(".")[0]
try:
date = batch_num_dict[self.batch_num]
except KeyError:
date = "Run metadata has not been set"
print(date + " for sample: " + str(self.folder) + " " + str(self.batch_num))
self.analysis_date = date
else:
self.batch_num = no_value
self.analysis_date = no_value
def set_analysis_date(self):
'''Sets the date of analysis to the date of the MiSeq run.'''
if self.batch_num == no_value:
self.analysis_date = no_value
else:
try:
date = batch_num_dict[self.batch_num]
except KeyError:
date = "Run metadata has not been set"
print(date + " for sample: " + str(self.folder) + " " + str(self.batch_num))
self.analysis_date = date
def sort_control(self):
if self.region == "Control":
s_loc = str(self.folder).rfind("S")
if s_loc == -1:
self.folder = self.folder
else:
self.folder = str(self.folder)[0:s_loc]
self.sampleid = self.folder + "_" + str(self.batch_num)
if self.folder.lower()[0] == "b":
self.region = "Blanks"
elif self.folder.lower()[0] == "n":
self.region = "NTCs"
elif self.folder.lower()[0] == "p":
self.region = "Positives"
elif self.folder.lower()[0] == "g":
self.region = "Gblocks"
elif self.folder.lower()[0] == "t":
self.region = "TR"
else:
self.region = "Unknowns"
self.sampleid = "F" + str(self.folder)
self.folder = self.folder.upper()
def assign_surrounding_samples(self, sur_coords, row, col, sheet_name):
'''Assigns the plate, coordinate, and surrounding coordinates for similaity analysis.'''
if sur_coords:
self.sur_samples = sur_coords
if row and col:
self.plate_loc = [row,col]
if sheet_name:
self.plate = str(sheet_name)
def assign_most_sim_sample(self, sim, sample):
'''Assigns the most similar surrounding sample and the similarity.'''
self.sim = sim
self.sim_sample = sample
def amend_sample_note(self, note):
if self.note == "":
self.note = note
else:
self.note = self.note + "," + note
#Global functions:
def community_analysis_export(samples_otus, keep_list, control_regions):
otus_all_writer = pd.ExcelWriter("otus_all.xlsx")
print("Exporting all otus for community analysis")
if keep_list[0] == "all":
keep_list = ["Anglian", "Midlands", "South West",
"Southern", "North West", "North East",
"Thames", "Unknowns", "Blanks",
"Positives", "Gblocks", "NTCs",
"TR", "Aberdeen", "Perth",
"Eurocentrl", "Dingwall", "Dumfries",
"Galashiels", "Bowlblank" ]
for sample in samples_otus:
if sample.count >= 1:
if sample.region in keep_list:
if sample.region in control_regions:
try:
sample.otu_tab.columns = ["PrefTaxon", sample.sampleid]
interim_df = sample.otu_tab
main_df = pd.merge(main_df, interim_df, on="PrefTaxon")
except NameError:
main_df = sample.otu_tab
else:
try:
interim_df = sample.otu_tab
main_df = pd.merge(main_df, interim_df, on="PrefTaxon")
except NameError:
main_df = sample.otu_tab
df = format_df(main_df)
df = df.transpose()
df.to_excel(otus_all_writer, sheet_name="comunity_analysis", header=True, index=True)
otus_all_writer.save()
def add_biosys_headers(df, biosys_dict):
biosys_df = pd.DataFrame.from_dict(biosys_dict, orient='columns')
biosys_df = biosys_df.rename(index={0:'siteid',1:'sampleid'})
df = pd.concat([biosys_df, df], sort=True)
return df
def format_df(df):
df = df[~df.PrefTaxon.str.contains("batch_num")]
df = df.set_index(list(df)[0])
df = df.loc[(df!=0).any(axis=1)]
return df
def filter_otus_by_region(region, samples_otus, writer, control_regions):
print(region)
if region == no_value:
no_reg = open("samples_with_no_region_values.text", "w")
no_reg.write("Region for the below samples is " + region + "\n")
no_reg.write("Folder_id\tCounts\tSample_id\tSite_id\tPRN\n")
for sample in metadata_list:
if sample.region == region:
no_reg.write(i.folder + "\t" + str(i.count) + "\t" + i.sampleid + "\t" + i.siteid + "\t" + i.prn + "\n")
no_reg.close()
elif region == "TR":
biosys_siteid_dict = {}
for sample_tr in samples_otus:
if sample_tr.region == "TR":
if sample_tr.count >= 3000:
if sample_tr.folder[2] == "3":
sample_original_fn = sample_tr.folder[2:8]
elif sample_tr.folder[2] == "4":
sample_original_fn = sample_tr.folder[2:9]
else:
sample_orignal_fn = sample_tr.folder
try:
interim_df = sample_tr.otu_tab
main_df = pd.merge(main_df, interim_df, on="PrefTaxon")
except NameError:
main_df = sample_tr.otu_tab
for sample_og in samples_otus:
if sample_og.folder == sample_original_fn:
if sample_og.region != region:
if sample_og.count > 1:
interim_df = sample_og.otu_tab
main_df = pd.merge(main_df, interim_df, on="PrefTaxon")
try:
df = format_df(main_df)
df = add_biosys_headers(df, biosys_siteid_dict)
df.to_excel(writer, sheet_name=region, header=True, index=True)
except UnboundLocalError:
print(" Region " + region + " had no passing samples.")
elif region in control_regions:
for sample in samples_otus:
if sample.region == region:
if sample.count >= 3000:
try:
interim_df = sample.otu_tab
main_df = pd.merge(main_df, interim_df, on="PrefTaxon")
except NameError:
main_df = sample.otu_tab
try:
df = format_df(main_df)
df.to_excel(writer, sheet_name=region, header=True, index=True)
except UnboundLocalError:
print(" Region " + region + " had no passing samples.")
else:
biosys_siteid_dict = {}
for sample in samples_otus:
if sample.region == region:
if sample.count >= 3000:
biosys_siteid_dict[sample.folder] = [str(sample.siteid), str(sample.sampleid)]
try:
interim_df = sample.otu_tab
main_df = pd.merge(main_df, interim_df, on="PrefTaxon")
except NameError:
main_df = sample.otu_tab
try:
df = format_df(main_df)
df = add_biosys_headers(df, biosys_siteid_dict)
df.to_excel(writer, sheet_name=region, header=True, index=True)
except UnboundLocalError:
print(" Region " + region + " had no passing samples.")
def get_region_list(sample_list):
regions_all = []
for sample in sample_list:
regions_all.append(sample.region)
region_unique = []
for region in regions_all:
if region not in region_unique:
region_unique.append(region)
region_unique = [x for x in region_unique if str(x) != 'nan']
return region_unique
def delete_file(file_in):
file_exists = os.path.isfile(file_in)
if file_exists == True:
send2trash.send2trash(file_in)
def save_sample_info(sample_list, writer, control_regions):
interim = "inter.text"
delete_file(interim)
file_interim = open(interim, "w")
header = ("SampleID,SiteID,Region,Area,PRN,Site Name,Sample Date,Barcode,Date of analysis,"
"BatchNum,Sequence Counts,Pass/Fail,FolderNumber,Notes\n")
file_interim.write(header)
for sample in sample_list:
if sample.region in control_regions:
pass
else:
line = ( str(sample.sampleid) + "," + str(sample.siteid) + "," + str(sample.region) + "," +
str(sample.area) + "," + str(sample.prn) + "," + str(sample.sitename) + "," +
str(sample.sampledate) + "," + str(sample.barcode) + "," +
str(sample.analysis_date) + "," + str(sample.batch_num) + "," +
str(sample.count) + "," + str(sample.pass_fail) + "," +
str(sample.folder) + "," + str(sample.note) + "\n" )
file_interim.write(line)
file_interim.close()
sample_info_df = pd.read_csv(interim, delimiter = ",")
sample_info_df.to_excel(writer, sheet_name="Sample batch information", header=True, index=False)
def baroni_urbani_buser_coefficient(df):
a = 0 #Num species present in both samples
b = 0 #Num species present in only sample 0
c = 0 #Num species present in only sample 1
d = 0 #Num species absent in both samples
df = df.drop(columns="PrefTaxon")
for row_num in range(0, df.shape[0]):
#col 0 is sample 1, col 1 is sample 2
#Create binary data
count_0 = df.iloc[row_num][0]
if count_0 > 1:
count_0 = 1
count_1 = df.iloc[row_num][1]
if count_1 > 1:
count_1 = 1
#compare binary data
if count_0 == count_1:
if count_0 == 1:
a=a+1
else:
d=d+1
elif count_0 > count_1:
b=b+1
elif count_0 < count_1:
c=c+1
else:
MathsError
#calculate coefficient
bub = float((sqrt(a*d) + a) / (sqrt(a*d) + a + b + c))
return bub
#dif
dif = {"PrefTaxon":["EC","IE"],"A":[1,0],"B":[0,1]}
same = {"PrefTaxon":["EC","IE"],"A":[1,0],"B":[1,0]}
assert baroni_urbani_buser_coefficient(pd.DataFrame(data=dif)) == 0
assert baroni_urbani_buser_coefficient(pd.DataFrame(data=same)) == 1
def perform_similarity_checks(sample_list, writer):
print("Performing Baroni–Urbani–Buser coefficient similarity checks...")
sim_all = []
for sample_cent in sample_list:
status = sample_cent.folder
count = sample_list.index(sample_cent)
filled_len = int(round(60 * count / float(len(sample_list))))
percents = round(100.0 * count / float(len(sample_list)), 5)
bar = '=' * filled_len + '-' * (60 - filled_len)
sys.stdout.write('[%s] %s%s Analysing sample: %s\r' % (bar, percents, '%', status))
sys.stdout.flush()
cent_df = sample_cent.otu_tab
if sample_cent.pass_fail != "Successful":
continue
highest_sim = 0.0
most_sim_sample = ""
for sample_sur in sample_list:
if sample_cent == sample_sur:
pass
else:
if sample_sur.pass_fail != "Successful":
continue
try:
if sample_cent.plate != sample_sur.plate:
pass
else:
if sample_sur.plate_loc in sample_cent.sur_samples:
sur_df = sample_sur.otu_tab
if not isinstance(sur_df, pd.DataFrame):
continue
df = pd.merge(cent_df, sur_df, on="PrefTaxon")
similarity = baroni_urbani_buser_coefficient(df)
sim_all.append(similarity)
if similarity > highest_sim:
highest_sim = similarity
most_sim_sample = sample_sur.folder
else:
sur_df = sample_sur.otu_tab
if not isinstance(sur_df, pd.DataFrame):
continue
df = pd.merge(cent_df, sur_df, on="PrefTaxon")
similarity = baroni_urbani_buser_coefficient(df)
sim_all.append(similarity)
except AttributeError:
pass
sample_cent.assign_most_sim_sample(highest_sim, most_sim_sample)
print("\n")
folder_nums = []
sim_samps = []
bub_cos = []
plates = []
plate_locs = []
for sample in sample_list:
folder_nums.append(sample.folder)
sim_samps.append(sample.sim_sample)
bub_cos.append(sample.sim)
plate_locs.append(sample.plate_loc)
try:
plates.append(sample.plate)
except AttributeError:
plates.append(" ")
print("Mean similarity of all samples: " + str(stat.mean(sim_all)))
print("Standard deviationm of similarity of all samples: " + str(stat.stdev(sim_all)))
sim_dict = {"FolderNumber":folder_nums,"Most Similar Sample":sim_samps, "BUB_Co":bub_cos, "Plate":plates}
sim_df = pd.DataFrame.from_dict(sim_dict)
sim_df.to_excel(writer, sheet_name="BUB_coefficient", header=True, index=False)
def get_surrounding_coords(row, col):
if row == 0:
surround_rows = [row, row+1]
elif row == 7:
surround_rows = [row-1 ,row]
else:
surround_rows = [row-1,row,row+1]
if col == 1:
surround_cols = [col,col+1]
elif col == 12:
surround_cols = [col-1,col]
else:
surround_cols = [col-1,col,col+1]
sur_coords = []
for sur_row in surround_rows:
for sur_col in surround_cols:
sur_coords.append([sur_row,sur_col])
return sur_coords
def import_extraction_sheets(data_dir, xl_file, samples):
print("Importing extraction sheets...")
dir_abspath = os.path.abspath(data_dir)
xl_abspath = os.path.join(dir_abspath, xl_file)
xl = pd.ExcelFile(xl_abspath)
sheet_names = xl.sheet_names
sample_groups = {}
for sheet_name in sheet_names:
sheet = xl.parse(sheet_name=sheet_name)
if sheet.empty:
pass
else:
sheet = sheet.set_index(list(sheet.columns.values)[0])
plate = sheet[["Barcode Loc", "Sample ID"]].copy().head(96)
plate_samples = plate["Sample ID"].tolist()
new_plate_samples = []
for item in plate_samples:
new_item = str(item)
new_plate_samples.append(new_item)
for sample in samples:
if sample.folder in new_plate_samples:
row_num = new_plate_samples.index(sample.folder)
barcode_location = plate.iat[row_num, 0]
row_letters = ["A","B","C","D","E","F","G","H"]
row = row_letters.index(barcode_location[0])
col = int(barcode_location[1])
sur_coords = get_surrounding_coords(row, col)
try:
if sample.plate:
sample.amend_sample_note("Sample also found on " + sheet_name)
except AttributeError:
sample.assign_surrounding_samples(sur_coords, row, col, sheet_name)
def import_otus(file_name):
otus = pd.read_csv(file_name, delimiter = "\t")
otus = otus.rename(index=str)
return otus
def import_otu_tables_main(directory, sample_list):
dir_abspath = os.path.abspath(directory)
file_paths = glob.glob(str(dir_abspath) + "/*.tsv")
files = []
for file_path in file_paths:
path,file_name = os.path.split(file_path)
files.append(file_name)
for file_name in files:
otus = import_otus(path + "/" + file_name)
headers = list(otus.columns.values)
headers.pop(0)
new_headers = {} #changes made to accomadate formatting of re-demux
for header in headers:
try:
header_split = header.split(".")
if header_split[0][0].lower() in ["b","n","p","g","t","u"]:
new_header = header_split[0] + header_split[1]
new_header = str(new_header)
else:
new_header = header_split[0]
new_headers[header] = new_header
except IndexError:
print("\nHeader " + str(header) + " has been assumed to have been changed manually.")
new_headers[header] = header
otus = otus.rename(columns=new_headers)
headers = list(otus.columns.values)
headers.pop(0)
headers_added = []
for header in headers:
for sample in sample_list:
if header == str(sample.folder):
if sample.count > 3000:
headers_added.append(header)
else:
df = otus[["PrefTaxon"]].copy()
df[header] = otus[header].copy()
sample.assign_results(df, file_name)
headers_added.append(header)
for header in headers:
if header in headers_added:
pass
else:
sample = Diatom_Sample(None, None, None, "Control", None, None, None, None, header)
df = otus[["PrefTaxon"]].copy()
df[header] = otus[header].copy()
sample.assign_results(df, file_name)
sample.sort_control()
sample.otu_tab.columns = ["PrefTaxon", sample.sampleid]
sample_list.append(sample)
return sample_list
def get_initials(string):
xs = (string)
words_list = xs.split()
initials = ""
for word in words_list:
initials = initials + word[0].upper()
return initials
def get_capital(string):
xs = (string)
word_list = xs.split()
words_num = len(word_list)
words_return = ""
for word in word_list:
word_return = word[0].upper() + word[1:].lower()
words_return = words_return + word_return + " "
return words_return.strip()
def import_metadata_sepa(inputxl, directory):
dir_abspath = os.path.abspath(directory)
xl_abspath = os.path.join(directory, inputxl)
print("Metadata input file: " + xl_abspath)
xl = pd.ExcelFile(xl_abspath)
samples_reg = xl.parse(sheet_name=0)
samples_list = []
samples_info = samples_reg[["Region", "S_SAMPLING_PT_DESC", "SAMPLE_NUMBER", "SAMPLED_DATE"]]
samples_info.columns = ["region", "site_name", "sepa_num", "sample_date"]
for row in samples_info.itertuples():
try:
sepa_num = str(int(row[3]))
except ValueError:
sepa_num = str(row[3])
region = get_capital(str(row[1]))
site_name = str(row[2])
try:
sample_date = str(row[4])
except ValueError:
print("Value for SampleDate not accepted for sample: " + sepa_num + " orignal value: " + str(row[10]))
SampleDate = "01/01/18"
diatom_sample_info = Diatom_Sample(sepa_num, "1", "area", region, sepa_num, site_name, sample_date, sepa_num, sepa_num)
samples_list.append(diatom_sample_info)
return samples_list
def import_metadata_ea(inputxl, directory):
dir_abspath = os.path.abspath(directory)
xl_abspath = os.path.join(directory, inputxl)
print("Metadata input file: " + xl_abspath)
xl =
|
pd.ExcelFile(xl_abspath)
|
pandas.ExcelFile
|
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 8, 11, 15]), "number", None),
(pd.Index(["red", "white", "yellow"]), "color", None),
([(1, "red"), (3, "red")], None, None),
(((1, "red"), (3, "red")), None, None),
(
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]],
names=("number", "color"),
),
None,
None,
),
(
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
None,
None,
),
(
pd.MultiIndex.from_arrays(
[
[1, 2, 3, 10, 100],
["red", "blue", "green", "pink", "white"],
],
names=("number", "color"),
),
None,
None,
),
],
)
def test_isin_multiindex(data, values, level, err):
pmdx = data
gmdx = cudf.from_pandas(data)
if err is None:
expected = pmdx.isin(values, level=level)
if isinstance(values, pd.MultiIndex):
values = cudf.from_pandas(values)
got = gmdx.isin(values, level=level)
assert_eq(got, expected)
else:
assert_exceptions_equal(
lfunc=pmdx.isin,
rfunc=gmdx.isin,
lfunc_args_and_kwargs=([values], {"level": level}),
rfunc_args_and_kwargs=([values], {"level": level}),
check_exception_type=False,
expected_error_message=re.escape(
"values need to be a Multi-Index or set/list-like tuple "
"squences when `level=None`."
),
)
@pytest.mark.parametrize(
"data",
[
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [8, 2, 1, 0, 2, 4, 5],
"num_wings": [2, 0, 2, 1, 2, 4, -1],
}
),
],
)
@pytest.mark.parametrize(
"values",
[
[0, 2],
{"num_wings": [0, 3]},
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
["sparrow", "pigeon"],
pd.Series(["sparrow", "pigeon"], dtype="category"),
pd.Series([1, 2, 3, 4, 5]),
"abc",
123,
],
)
def test_isin_dataframe(data, values):
pdf = data
gdf = cudf.from_pandas(pdf)
if cudf.utils.dtypes.is_scalar(values):
assert_exceptions_equal(
lfunc=pdf.isin,
rfunc=gdf.isin,
lfunc_args_and_kwargs=([values],),
rfunc_args_and_kwargs=([values],),
)
else:
try:
expected = pdf.isin(values)
except ValueError as e:
if str(e) == "Lengths must match.":
pytest.xfail(
not PANDAS_GE_110,
"https://github.com/pandas-dev/pandas/issues/34256",
)
if isinstance(values, (pd.DataFrame, pd.Series)):
values = cudf.from_pandas(values)
got = gdf.isin(values)
assert_eq(got, expected)
def test_constructor_properties():
df = cudf.DataFrame()
key1 = "a"
key2 = "b"
val1 = np.array([123], dtype=np.float64)
val2 = np.array([321], dtype=np.float64)
df[key1] = val1
df[key2] = val2
# Correct use of _constructor (for DataFrame)
assert_eq(df, df._constructor({key1: val1, key2: val2}))
# Correct use of _constructor (for cudf.Series)
assert_eq(df[key1], df[key2]._constructor(val1, name=key1))
# Correct use of _constructor_sliced (for DataFrame)
assert_eq(df[key1], df._constructor_sliced(val1, name=key1))
# Correct use of _constructor_expanddim (for cudf.Series)
assert_eq(df, df[key2]._constructor_expanddim({key1: val1, key2: val2}))
# Incorrect use of _constructor_sliced (Raises for cudf.Series)
with pytest.raises(NotImplementedError):
df[key1]._constructor_sliced
# Incorrect use of _constructor_expanddim (Raises for DataFrame)
with pytest.raises(NotImplementedError):
df._constructor_expanddim
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", ALL_TYPES)
def test_df_astype_numeric_to_all(dtype, as_dtype):
if "uint" in dtype:
data = [1, 2, None, 4, 7]
elif "int" in dtype or "longlong" in dtype:
data = [1, 2, None, 4, -7]
elif "float" in dtype:
data = [1.0, 2.0, None, 4.0, np.nan, -7.0]
gdf = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype=dtype)
gdf["bar"] = cudf.Series(data, dtype=dtype)
insert_data = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = insert_data.astype(as_dtype)
expect["bar"] = insert_data.astype(as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_df_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
# change None to "NaT" after this issue is fixed:
# https://github.com/rapidsai/cudf/issues/5117
data = ["2001-01-01", "2002-02-02", "2000-01-05", None]
elif as_dtype == "int32":
data = [1, 2, 3]
elif as_dtype == "category":
data = ["1", "2", "3", None]
elif "float" in as_dtype:
data = [1.0, 2.0, 3.0, np.nan]
insert_data = cudf.Series.from_pandas(pd.Series(data, dtype="str"))
expect_data = cudf.Series(data, dtype=as_dtype)
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = insert_data
gdf["bar"] = insert_data
expect["foo"] = expect_data
expect["bar"] = expect_data
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int64",
"datetime64[s]",
"datetime64[us]",
"datetime64[ns]",
"str",
"category",
],
)
def test_df_astype_datetime_to_other(as_dtype):
data = [
"1991-11-20 00:00:00.000",
"2004-12-04 00:00:00.000",
"2016-09-13 00:00:00.000",
None,
]
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype="datetime64[ms]")
gdf["bar"] = cudf.Series(data, dtype="datetime64[ms]")
if as_dtype == "int64":
expect["foo"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
expect["bar"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
elif as_dtype == "str":
expect["foo"] = cudf.Series(data, dtype="str")
expect["bar"] = cudf.Series(data, dtype="str")
elif as_dtype == "category":
expect["foo"] = cudf.Series(gdf["foo"], dtype="category")
expect["bar"] = cudf.Series(gdf["bar"], dtype="category")
else:
expect["foo"] = cudf.Series(data, dtype=as_dtype)
expect["bar"] = cudf.Series(data, dtype=as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_df_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf.astype(as_dtype), gdf.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_df_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
pdf.astype(ordered_dtype_pd).astype("int32"),
gdf.astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize(
"dtype,args",
[(dtype, {}) for dtype in ALL_TYPES]
+ [("category", {"ordered": True}), ("category", {"ordered": False})],
)
def test_empty_df_astype(dtype, args):
df = cudf.DataFrame()
kwargs = {}
kwargs.update(args)
assert_eq(df, df.astype(dtype=dtype, **kwargs))
@pytest.mark.parametrize(
"errors",
[
pytest.param(
"raise", marks=pytest.mark.xfail(reason="should raise error here")
),
pytest.param("other", marks=pytest.mark.xfail(raises=ValueError)),
"ignore",
pytest.param(
"warn", marks=pytest.mark.filterwarnings("ignore:Traceback")
),
],
)
def test_series_astype_error_handling(errors):
sr = cudf.Series(["random", "words"])
got = sr.astype("datetime64", errors=errors)
assert_eq(sr, got)
@pytest.mark.parametrize("dtype", ALL_TYPES)
def test_df_constructor_dtype(dtype):
if "datetime" in dtype:
data = ["1991-11-20", "2004-12-04", "2016-09-13", None]
elif dtype == "str":
data = ["a", "b", "c", None]
elif "float" in dtype:
data = [1.0, 0.5, -1.1, np.nan, None]
elif "bool" in dtype:
data = [True, False, None]
else:
data = [1, 2, 3, None]
sr = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = sr
expect["bar"] = sr
got = cudf.DataFrame({"foo": data, "bar": data}, dtype=dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": int}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": str}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": bool, "b": int, "c": float, "d": str}
),
cudf.DataFrame(),
cudf.DataFrame({"a": [0, 1, 2], "b": [1, None, 3]}),
cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
),
cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False
),
}
),
],
)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops(data, op, skipna):
gdf = data
pdf = gdf.to_pandas()
if op in ("var", "std"):
expected = getattr(pdf, op)(axis=1, ddof=0, skipna=skipna)
got = getattr(gdf, op)(axis=1, ddof=0, skipna=skipna)
else:
expected = getattr(pdf, op)(axis=1, skipna=skipna)
got = getattr(gdf, op)(axis=1, skipna=skipna)
assert_eq(expected, got, check_exact=False)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
def test_rowwise_ops_nullable_dtypes_all_null(op):
gdf = cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
)
expected = cudf.Series([None, None, None, None], dtype="float64")
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series(
[10.0, None, np.NaN, 2234.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"min",
cudf.Series(
[10.0, None, np.NaN, 13.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"sum",
cudf.Series(
[20.0, None, np.NaN, 2247.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"product",
cudf.Series(
[100.0, None, np.NaN, 29042.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"mean",
cudf.Series(
[10.0, None, np.NaN, 1123.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"var",
cudf.Series(
[0.0, None, np.NaN, 1233210.25, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"std",
cudf.Series(
[0.0, None, np.NaN, 1110.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
],
)
def test_rowwise_ops_nullable_dtypes_partial_null(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series([10, None, None, 2234, None, 453], dtype="int64",),
),
("min", cudf.Series([10, None, None, 13, None, 15], dtype="int64",),),
(
"sum",
cudf.Series([20, None, None, 2247, None, 468], dtype="int64",),
),
(
"product",
cudf.Series([100, None, None, 29042, None, 6795], dtype="int64",),
),
(
"mean",
cudf.Series(
[10.0, None, None, 1123.5, None, 234.0], dtype="float32",
),
),
(
"var",
cudf.Series(
[0.0, None, None, 1233210.25, None, 47961.0], dtype="float32",
),
),
(
"std",
cudf.Series(
[0.0, None, None, 1110.5, None, 219.0], dtype="float32",
),
),
],
)
def test_rowwise_ops_nullable_int_dtypes(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, None, 13, None, 15],
"b": cudf.Series(
[10, None, 323, 2234, None, 453], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ns]"
),
"t3": cudf.Series(
["1960-08-31 06:00:00", "2030-08-02 10:00:00"], dtype="<M8[s]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[us]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(["1940-08-31 06:00:00", None], dtype="<M8[ms]"),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
"b1": cudf.Series([True, False], dtype="bool"),
},
],
)
@pytest.mark.parametrize("op", ["max", "min"])
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops_datetime_dtypes(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data,op,skipna",
[
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"max",
True,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
False,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
True,
),
],
)
def test_rowwise_ops_datetime_dtypes_2(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
(
{
"t1": pd.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ns]",
),
"t2": pd.Series(
["1940-08-31 06:00:00", pd.NaT], dtype="<M8[ns]"
),
}
)
],
)
def test_rowwise_ops_datetime_dtypes_pdbug(data):
pdf = pd.DataFrame(data)
gdf = cudf.from_pandas(pdf)
expected = pdf.max(axis=1, skipna=False)
got = gdf.max(axis=1, skipna=False)
if PANDAS_GE_120:
assert_eq(got, expected)
else:
# PANDAS BUG: https://github.com/pandas-dev/pandas/issues/36907
with pytest.raises(AssertionError, match="numpy array are different"):
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[5.0, 6.0, 7.0],
"single value",
np.array(1, dtype="int64"),
np.array(0.6273643, dtype="float64"),
],
)
def test_insert(data):
pdf = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": ["a", "b", "c"]})
gdf = cudf.DataFrame.from_pandas(pdf)
# insertion by index
pdf.insert(0, "foo", data)
gdf.insert(0, "foo", data)
assert_eq(pdf, gdf)
pdf.insert(3, "bar", data)
gdf.insert(3, "bar", data)
assert_eq(pdf, gdf)
pdf.insert(1, "baz", data)
gdf.insert(1, "baz", data)
assert_eq(pdf, gdf)
# pandas insert doesn't support negative indexing
pdf.insert(len(pdf.columns), "qux", data)
gdf.insert(-1, "qux", data)
assert_eq(pdf, gdf)
def test_cov():
gdf = cudf.datasets.randomdata(10)
pdf = gdf.to_pandas()
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.xfail(reason="cupy-based cov does not support nulls")
def test_cov_nans():
pdf = pd.DataFrame()
pdf["a"] = [None, None, None, 2.00758632, None]
pdf["b"] = [0.36403686, None, None, None, None]
pdf["c"] = [None, None, None, 0.64882227, None]
pdf["d"] = [None, -1.46863125, None, 1.22477948, -0.06031689]
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.parametrize(
"gsr",
[
cudf.Series([4, 2, 3]),
cudf.Series([4, 2, 3], index=["a", "b", "c"]),
cudf.Series([4, 2, 3], index=["a", "b", "d"]),
cudf.Series([4, 2], index=["a", "b"]),
cudf.Series([4, 2, 3], index=cudf.core.index.RangeIndex(0, 3)),
pytest.param(
cudf.Series([4, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"]),
marks=pytest.mark.xfail,
),
],
)
@pytest.mark.parametrize("colnames", [["a", "b", "c"], [0, 1, 2]])
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_df_sr_binop(gsr, colnames, op):
data = [[3.0, 2.0, 5.0], [3.0, None, 5.0], [6.0, 7.0, np.nan]]
data = dict(zip(colnames, data))
gsr = gsr.astype("float64")
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas(nullable=True)
psr = gsr.to_pandas(nullable=True)
expect = op(pdf, psr)
got = op(gdf, gsr).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
expect = op(psr, pdf)
got = op(gsr, gdf).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
# comparison ops will temporarily XFAIL
# see PR https://github.com/rapidsai/cudf/pull/7491
pytest.param(operator.eq, marks=pytest.mark.xfail()),
pytest.param(operator.lt, marks=pytest.mark.xfail()),
pytest.param(operator.le, marks=pytest.mark.xfail()),
pytest.param(operator.gt, marks=pytest.mark.xfail()),
pytest.param(operator.ge, marks=pytest.mark.xfail()),
pytest.param(operator.ne, marks=pytest.mark.xfail()),
],
)
@pytest.mark.parametrize(
"gsr", [cudf.Series([1, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"])]
)
def test_df_sr_binop_col_order(gsr, op):
colnames = [0, 1, 2]
data = [[0, 2, 5], [3, None, 5], [6, 7, np.nan]]
data = dict(zip(colnames, data))
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame.from_dict(data)
psr = gsr.to_pandas()
expect = op(pdf, psr).astype("float")
out = op(gdf, gsr).astype("float")
got = out[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize("set_index", [None, "A", "C", "D"])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(deep, index, set_index):
# Testing numerical/datetime by comparing with pandas
# (string and categorical columns will be different)
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int64"),
"B": np.arange(rows, dtype="int32"),
"C": np.arange(rows, dtype="float64"),
}
)
df["D"] = pd.to_datetime(df.A)
if set_index:
df = df.set_index(set_index)
gdf = cudf.from_pandas(df)
if index and set_index is None:
# Special Case: Assume RangeIndex size == 0
assert gdf.index.memory_usage(deep=deep) == 0
else:
# Check for Series only
assert df["B"].memory_usage(index=index, deep=deep) == gdf[
"B"
].memory_usage(index=index, deep=deep)
# Check for entire DataFrame
assert_eq(
df.memory_usage(index=index, deep=deep).sort_index(),
gdf.memory_usage(index=index, deep=deep).sort_index(),
)
@pytest.mark.xfail
def test_memory_usage_string():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
gdf = cudf.from_pandas(df)
# Check deep=False (should match pandas)
assert gdf.B.memory_usage(deep=False, index=False) == df.B.memory_usage(
deep=False, index=False
)
# Check string column
assert gdf.B.memory_usage(deep=True, index=False) == df.B.memory_usage(
deep=True, index=False
)
# Check string index
assert gdf.set_index("B").index.memory_usage(
deep=True
) == df.B.memory_usage(deep=True, index=False)
def test_memory_usage_cat():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
df["B"] = df.B.astype("category")
gdf = cudf.from_pandas(df)
expected = (
gdf.B._column.cat().categories.__sizeof__()
+ gdf.B._column.cat().codes.__sizeof__()
)
# Check cat column
assert gdf.B.memory_usage(deep=True, index=False) == expected
# Check cat index
assert gdf.set_index("B").index.memory_usage(deep=True) == expected
def test_memory_usage_list():
df = cudf.DataFrame({"A": [[0, 1, 2, 3], [4, 5, 6], [7, 8], [9]]})
expected = (
df.A._column.offsets._memory_usage()
+ df.A._column.elements._memory_usage()
)
assert expected == df.A.memory_usage()
@pytest.mark.xfail
def test_memory_usage_multi():
rows = int(100)
deep = True
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(np.arange(3, dtype="int64"), rows),
"C": np.random.choice(np.arange(3, dtype="float64"), rows),
}
).set_index(["B", "C"])
gdf = cudf.from_pandas(df)
# Assume MultiIndex memory footprint is just that
# of the underlying columns, levels, and codes
expect = rows * 16 # Source Columns
expect += rows * 16 # Codes
expect += 3 * 8 # Level 0
expect += 3 * 8 # Level 1
assert expect == gdf.index.memory_usage(deep=deep)
@pytest.mark.parametrize(
"list_input",
[
pytest.param([1, 2, 3, 4], id="smaller"),
pytest.param([1, 2, 3, 4, 5, 6], id="larger"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_list(list_input, key):
gdf = cudf.datasets.randomdata(5)
with pytest.raises(
ValueError, match=("All columns must be of equal length")
):
gdf[key] = list_input
@pytest.mark.parametrize(
"series_input",
[
pytest.param(cudf.Series([1, 2, 3, 4]), id="smaller_cudf"),
pytest.param(cudf.Series([1, 2, 3, 4, 5, 6]), id="larger_cudf"),
pytest.param(cudf.Series([1, 2, 3], index=[4, 5, 6]), id="index_cudf"),
pytest.param(pd.Series([1, 2, 3, 4]), id="smaller_pandas"),
pytest.param(pd.Series([1, 2, 3, 4, 5, 6]), id="larger_pandas"),
pytest.param(pd.Series([1, 2, 3], index=[4, 5, 6]), id="index_pandas"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_series(series_input, key):
gdf = cudf.datasets.randomdata(5)
pdf = gdf.to_pandas()
pandas_input = series_input
if isinstance(pandas_input, cudf.Series):
pandas_input = pandas_input.to_pandas()
expect = pdf
expect[key] = pandas_input
got = gdf
got[key] = series_input
# Pandas uses NaN and typecasts to float64 if there's missing values on
# alignment, so need to typecast to float64 for equality comparison
expect = expect.astype("float64")
got = got.astype("float64")
assert_eq(expect, got)
def test_tupleize_cols_False_set():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
pdf[("a", "b")] = [1]
gdf[("a", "b")] = [1]
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_init_multiindex_from_dict():
pdf = pd.DataFrame({("a", "b"): [1]})
gdf = cudf.DataFrame({("a", "b"): [1]})
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_change_column_dtype_in_empty():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
pdf["b"] = pdf["b"].astype("int64")
gdf["b"] = gdf["b"].astype("int64")
assert_eq(pdf, gdf)
def test_dataframe_from_table_empty_index():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
odict = df._data
tbl = cudf._lib.table.Table(odict)
result = cudf.DataFrame._from_table(tbl) # noqa: F841
@pytest.mark.parametrize("dtype", ["int64", "str"])
def test_dataframe_from_dictionary_series_same_name_index(dtype):
pd_idx1 = pd.Index([1, 2, 0], name="test_index").astype(dtype)
pd_idx2 = pd.Index([2, 0, 1], name="test_index").astype(dtype)
pd_series1 = pd.Series([1, 2, 3], index=pd_idx1)
pd_series2 = pd.Series([1, 2, 3], index=pd_idx2)
gd_idx1 = cudf.from_pandas(pd_idx1)
gd_idx2 = cudf.from_pandas(pd_idx2)
gd_series1 = cudf.Series([1, 2, 3], index=gd_idx1)
gd_series2 = cudf.Series([1, 2, 3], index=gd_idx2)
expect =
|
pd.DataFrame({"a": pd_series1, "b": pd_series2})
|
pandas.DataFrame
|
import argparse
import collections
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import re
import scipy.cluster
import scipy.spatial.distance
import sklearn.cluster
import sklearn.feature_extraction
import sklearn.manifold
import sklearn.metrics.pairwise
if True:
p = argparse.ArgumentParser()
p.add_argument("--tag", required=True)
args = p.parse_args()
tag = args.tag
else:
tag = "Hemoglobin_CTEP Trials_072018"
#tag = "Platelets_CTEP Trials_072018"
#tag = "WBC_CTEP Trials_072018"
#tag = "HIV_CTEPTrials_072018"
input_tsv = "../nci_data/dataset1-trials/" + tag + ".tsv"
output_pdf = "./" + tag + ".clustering.pdf"
features_csv = "./" + tag + ".features.csv"
linkage_matrix_csv = "./" + tag + ".linkage_matrix.csv"
# Load data.
tb = pd.read_table(input_tsv)
num_rows_excluded = sum(pd.isnull(tb["Boolean"]))
num_rows_orig = tb.shape[0]
tb = tb.loc[~pd.isnull(tb["Boolean"]),:]
tb = tb.reset_index(drop=True)
num_rows = tb.shape[0]
print("Excluding %d of %d rows" % (num_rows_excluded, num_rows_orig))
print("After exclusion, %d rows remain" % num_rows)
# Parse boolean.
def f(b):
b = re.sub(r"[()]", "", b)
operators = [w for w in b.split() if w in ("OR", "AND")]
as_ops = b.replace("OR", "OP").replace("AND", "OP")
triples = [tuple(re.split(r'(>=|<=|>|<|==|=)', t.strip(), maxsplit=1)) for t in as_ops.split("OP")]
triples = [tuple(ti.strip() for ti in t) for t in triples]
for i, t in enumerate(triples):
print(t)
if len(t) == 2:
new_triple = (t[0], t[1], "?")
print("Warning: {} is not of length 3, replacing with {}".format(t, new_triple))
triples[i] = new_triple
if len(t) == 1:
new_triple = (t[0], "?", "?")
print("Warning: {} is not of length 3, replacing with {}".format(t, new_triple))
triples[i] = new_triple
return {"triples": triples, "operators": operators}
def g(b):
if pd.isnull(b):
return b
else:
return f(b)
tb["parsed"] = [g(b) for b in tb["Boolean"]]
triples = [x["triples"] for x in tb["parsed"] if x]
operators = [x["operators"] for x in tb["parsed"]]
# Make features.
feat = [collections.defaultdict(float) for i in range(tb.shape[0])]
for i, triple_list in enumerate(triples):
for l, c, r in triple_list:
# Add count of each element alone within each triple.
feat[i]["l_count_%s" % l] += 1
feat[i]["c_count_%s" % c] += 1
feat[i]["r_count_%s" % r] += 1
# Add count of each pair of elements within each triple.
feat[i]["lc_count_(%s, %s)" % (l, c)] += 1
feat[i]["lr_count_(%s, %s)" % (l, r)] += 1
feat[i]["cr_count_(%s, %s)" % (c, r)] += 1
# Add count of each triple.
t1 = (l, c, r)
feat[i]["triple_count_%s" % str(t1)] += 1
# Add count of each pair of triples.
for t2 in triple_list:
feat[i]["triple_pair_count_%s_%s" % (str(t1), str(t2))] += 1
for i, operator_list in enumerate(operators):
for o1 in operator_list:
# Add count for each operator.
feat[i]["operator_count_%s" % o1] += 1
# Add count for each pair of operators.
for o2 in operator_list:
feat[i]["operator_pair_count_%s_%s" % (o1, o2)] += 1
# Make feature matrix.
feature_vectorizer = sklearn.feature_extraction.DictVectorizer(sparse=False)
X = feature_vectorizer.fit_transform(feat)
# Carry out hierarchical clustering.
#hc_linkage = scipy.cluster.hierarchy.linkage(X, method="ward", metric="euclidean")
hc_linkage = scipy.cluster.hierarchy.linkage(X, method="complete", metric="cosine")
#hc_linkage = scipy.cluster.hierarchy.linkage(X, method="average", metric="cosine")
# Plot clustering.
h = 25.0 * tb.shape[0] / 174
fig = plt.figure(figsize=(25, h))
leaf_labels = [x for x in tb["Boolean"]]
dn = scipy.cluster.hierarchy.dendrogram(hc_linkage, labels=leaf_labels, orientation="left")
plt.title("Hierarchical clustering of %s " % tag)
plt.axis('tight')
plt.subplots_adjust(right=0.45)
plt.savefig(output_pdf)
plt.close(fig)
# Save features used for clustering.
feature_colnames = ["feature_%s" % x for x in feature_vectorizer.get_feature_names()]
feature_tb =
|
pd.DataFrame(X, index=tb.index, columns=feature_colnames)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import sys
sys.path.insert(1, '/')
import pdm_viz.EDA.EDA as eda
sys.path.insert(1, '/app')
import plotly.graph_objects as go
import datetime
tagList = ['MV26_PDI-1535A-02.PV_mean', 'MV26_PDI-1535B-02.PV_mean']
df =
|
pd.read_csv('/app/data/preprocessed/temp.csv')
|
pandas.read_csv
|
# Copyright (c) 2018-2019, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import nvstrings
from utils import assert_eq
def test_timestamp2int():
s = nvstrings.to_device(["2019-03-20T12:34:56Z", "2020-02-29T23:59:59Z"])
s1 = pd.Series(["2019-03-20T12:34:56Z", "2020-02-29T23:59:59Z"]).apply(
lambda x: pd.Timestamp(x))
got = s.timestamp2int()
expected = s1.astype(np.int64)
assert np.allclose(got, expected, 10)
s1 =
|
pd.Series(["2019-03-20T12:34:56Z", "2020-02-29T23:59:59Z"])
|
pandas.Series
|
import dash_bootstrap_components as dbc
import dash_html_components as html
import dash
import plotly.express as px
import dash_core_components as dcc
from dash.dependencies import Input, Output
import plotly.express as px
import pandas as pd
import plotly.graph_objects as go
import dash_daq as daq
import ast
from assets.CustomerReviewGraphs import CustomerReviewAnalysis
#from assets.CusomerReviewKeywords import CustomerReviewKeywords
def drawTrendsBarFigure(fig=None):
if(fig is None):
fig=resp['timeseries_bar_fig']
return html.Div([dbc.Card(dbc.CardBody([html.P('Review Trends'),
dcc.Graph(figure=fig.update_layout(
template='plotly_dark',plot_bgcolor= 'rgba(0, 0, 0, 0)',paper_bgcolor= 'rgba(0, 0, 0, 0)',),config={
'displayModeBar': False})])),])
def drawRatings(fig=None):
if(fig is None):
fig=resp['rating_fig']
return html.Div([dbc.Card(dbc.CardBody([html.P('Ratings Visualization'),
dcc.Graph(figure=fig.update_layout(template='plotly_dark',plot_bgcolor= 'rgba(0, 0, 0, 0)',
paper_bgcolor= 'rgba(0, 0, 0, 0)',),
config={'displayModeBar': False
})])), ])
def drawTimeSeriesFigure(fig=None):
if(fig is None):
fig=resp['timeseries_line_fig']
return html.Div([dbc.Card(dbc.CardBody([html.P('Review Sentiments Trend'),dcc.Graph(figure=fig.update_layout(template='plotly_dark',
plot_bgcolor= 'rgba(0, 0, 0, 0)',paper_bgcolor= 'rgba(0, 0, 0, 0)',),
config={'displayModeBar': False})])), ])
def drawPieFigure(fig=None):
if(fig is None):
fig=resp['sentiment_fig']
return html.Div([dbc.Card(dbc.CardBody([html.P('Review Sentiments'),dcc.Graph(
figure=fig.update_layout(template='plotly_dark',
plot_bgcolor= 'rgba(0, 0, 0, 0)',paper_bgcolor= 'rgba(0, 0, 0, 0)',),
config={'displayModeBar': False}) ])),])
def map_sentiment(rating):
if(int(rating)==3):
return 2
elif(int(rating)<3):
return 3
else:
return 1
def drawTrendingWords(pos_keywords=None,neg_kewords=None):
return html.Div([
dbc.Card(
dbc.CardBody([html.P('Trending Keywords'),dbc.ListGroup([
dbc.ListGroupItem(pos_keywords[0], color="success"),
dbc.ListGroupItem(neg_kewords[0], color="danger"),
dbc.ListGroupItem(pos_keywords[1], color="success"),
dbc.ListGroupItem(neg_kewords[1], color="danger"),
dbc.ListGroupItem(pos_keywords[2], color="danger"),
dbc.ListGroupItem(neg_kewords[2], color="danger"),
dbc.ListGroupItem(pos_keywords[3], color="success"),
dbc.ListGroupItem(neg_kewords[3], color="danger"),
dbc.ListGroupItem(neg_kewords[4], color="danger")
])])),], style={'textAlign': 'center'})
def drawRecentReviews(data):
return html.Div([
dbc.Card(
dbc.CardBody([html.P('Recent Reviews'),
dbc.Table.from_dataframe(data, striped=False, bordered=False, hover=True,dark=True,responsive=True)
])),], style={'textAlign': 'center',"maxHeight": "500px", "overflow": "scroll"})
review_business_data_merged =
|
pd.read_csv('data/yelp_reviews_business_merged.csv')
|
pandas.read_csv
|
from datetime import date
from datetime import datetime
from time import strptime
import logging
import numpy as np #type: ignore
import pandas as pd #type: ignore
from pandas.api.types import is_numeric_dtype
from pandas.api.types import is_timedelta64_dtype
import re
from typing import (
Any,
Callable,
Dict,
Hashable,
Iterable,
List,
NamedTuple,
Optional,
Pattern,
Set,
Tuple,
Union,
)
logger = logging.getLogger(__name__)
# add_xl_formula() {{{1
def add_xl_formula(df: pd.DataFrame,
column_name: str = 'xl_calc',
formula: str = '=CONCATENATE(A{row}, B{row}, C{row})',
offset: int = 2) -> pd.DataFrame:
'''add Excel (xl) formula column
Parameters
----------
df
pandas dataframe
column_name
the column name to be associated with the column formula values, default
'xl_calc'
formula
Excel formula to be applied. As an example:
.. code-block::
'=CONCATENATE(A{row}, B{row}, C{row})'
where {row} is the defined replacement variable which will be replaced
with actual individual row value.
offset
starting row value, default = 2 (resultant xl sheet includes headers)
Examples
--------
.. code-block::
formula = '=CONCATENATE(A{row}, B{row}, C{row})'
add_xl_formula(df, column_name='X7', formula=formula)
Returns
-------
pandas dataframe
'''
col_values = []
for x in range(offset, df.shape[0] + offset):
repl_str = re.sub('{ROW}', str(x), string=formula, flags=re.I)
col_values.append(repl_str)
df[column_name] = col_values
return df
# duration {{{1
def duration(s1: pd.Series,
s2: pd.Series = None,
unit: Union[str, None] = None,
round: Union[bool, int] = 2,
freq: str = 'd') -> pd.Series:
''' calculate duration between two columns (series)
Parameters
----------
s1
'from' datetime series
s2
'to' datetime series.
Default None. If None, defaults to today.
interval
default None - returns timedelta in days
'd' - days as an integer,
'years' (based on 365.25 days per year),
'months' (based on 30 day month)
Other possible options are:
- ‘W’, ‘D’, ‘T’, ‘S’, ‘L’, ‘U’, or ‘N’
- ‘days’ or ‘day’
- ‘hours’, ‘hour’, ‘hr’, or ‘h’
- ‘minutes’, ‘minute’, ‘min’, or ‘m’
- ‘seconds’, ‘second’, or ‘sec’
- ‘milliseconds’, ‘millisecond’, ‘millis’, or ‘milli’
- ‘microseconds’, ‘microsecond’, ‘micros’, or ‘micro’-
- ‘nanoseconds’, ‘nanosecond’, ‘nanos’, ‘nano’, or ‘ns’.
check out pandas
`timedelta object <https://pandas.pydata.org/docs/reference/api/pandas.Timedelta.html>`_
for details.
round
Default False. If duration result is an integer and this
parameter contains a positive integer, the result is round to this
decimal precision.
freq
Default is 'd'(days). If the duration result is a pd.Timedelta dtype,
the value can be 'rounded' using this frequency parameter.
Must be a fixed frequency like 'S' (second) not 'ME' (month end).
For a list of valid values, check out
`pandas offset aliases <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_
Returns
-------
series
if unit is None - series is of data type timedelta64[ns]
otherwise series of type int.
Examples
--------
.. code-block::
%%piper
sample_data()
>> select(['-countries', '-regions', '-ids', '-values_1', '-values_2'])
>> assign(new_date_col=pd.to_datetime('2018-01-01'))
>> assign(duration = lambda x: duration(x.new_date_col, x.order_dates, unit='months'))
>> assign(duration_dates_age = lambda x: duration(x['dates']))
>> head(tablefmt='plain')
dates rder_dates new_date_col duration duration_dates_age
0 2020-01-01 2020-01-07 2018-01-01 25 452 days
1 2020-01-02 2020-01-08 2018-01-01 25 451 days
2 2020-01-03 2020-01-09 2018-01-01 25 450 days
3 2020-01-04 2020-01-10 2018-01-01 25 449 days
'''
if s2 is None:
s2 = datetime.today()
if unit is None:
result = s2 - s1
elif unit == 'years':
result = ((s2 - s1) / pd.Timedelta(365.25, 'd'))
elif unit == 'months':
result = ((s2 - s1) /
|
pd.Timedelta(30, 'd')
|
pandas.Timedelta
|
from model import Transformer_
from dataloader import Vocab_tokenizer, get_loader
from bleu import calc_BLEU
from sklearn.utils import shuffle
import pandas as pd
import os
import numpy as np
import torch
import torch.nn as nn
from Scheduler import CosineAnnealingWarmUpRestarts
import torch.nn.functional as F
import math, copy, time
import itertools
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def train(CurrEpoch, Model, iterator, optimizer, scheduler, metric, metric_translation, clip,
lam_translation, lam_gloss):
Model.train()
epoch_loss = 0
for i, (features, glosses, translations) in enumerate(iterator):
src, orth, trg = \
features.to(device), glosses.to(device), translations.to(device)
#print(glosses.shape, translations.shape)
optimizer.zero_grad() # Initialize gradient
predict_translation, predict_gloss = Model(src, trg[:, :-1])
#print(predict_translation.shape, trg.shape)
translation_dim = predict_translation.shape[-1]
gloss_dim = predict_gloss.shape[-1]
predict_translation = predict_translation.contiguous().view(-1, translation_dim)
predict_gloss = predict_gloss.contiguous().view(-1, gloss_dim)
# GTs
# print(orth)
orth = orth.contiguous().view(-1)
orth = orth.type(torch.LongTensor).to(device)
trg = trg[:, 1:].contiguous().view(-1)
trg = trg.type(torch.LongTensor).to(device)
loss_translation = metric(predict_translation, trg)
loss_gloss = metric(predict_gloss, orth)
loss = (lam_translation * loss_translation + lam_gloss * loss_gloss) / (lam_gloss + lam_translation)
loss.backward()
# And gradient clipping :
torch.nn.utils.clip_grad_norm_(Model.parameters(), clip)
# Update params :
optimizer.step()
scheduler.step()
# total loss in epoch
epoch_loss += loss.item()
#print("+"*50)
return epoch_loss / len(iterator)
def eval(CurrEpoch, Model, iterator, metric, data_tokenizer, lam_translation, lam_gloss): # No gradient updatd, no optimizer and clipping
Model.eval()
epoch_loss = 0
with torch.no_grad():
test_sentence = []
GT_sentence = []
for i, (features, glosses, translations) in enumerate(iterator):
src, orth, trg = \
features.to(device), glosses.to(device), translations.to(device)
predict_translation, predict_gloss = Model(src, trg[:, :-1])
# Generate text file
for tokens in predict_translation:
# Get argmax of tokens, bring it back to CPU.
tokens = torch.argmax(tokens, dim=1).to(dtype=torch.long, device=torch.device("cpu"))
tokens = tokens.numpy()
# make string, append it to test_sentence
itos = data_tokenizer.stringnize(tokens)
pred_string = ' '.join(itos)
test_sentence.append(pred_string)
for tokens in trg:
tokens = tokens.to(dtype=torch.long, device=torch.device("cpu"))
tokens = tokens.numpy()
# make string, append it to test_sentence
itos = data_tokenizer.stringnize(tokens[1:])
GT_string = ' '.join(itos)
GT_sentence.append(GT_string)
translation_dim = predict_translation.shape[-1]
gloss_dim = predict_gloss.shape[-1]
# Re-allocate memory :
# output: [BS, trg_len - 1, output_dim]
# trg: [BS, trg_len-1], exclude <SOS>
# Predictions
predict_translation = predict_translation.contiguous().view(-1, translation_dim)
predict_gloss = predict_gloss.contiguous().view(-1, gloss_dim)
# GTs
orth = orth.contiguous().view(-1)
orth = orth.type(torch.LongTensor).to(device)
trg = trg[:, 1:].contiguous().view(-1)
trg = trg.type(torch.LongTensor).to(device)
loss_translation = metric(predict_translation, trg)
loss_gloss = metric(predict_gloss, orth)
loss = (lam_translation * loss_translation + lam_gloss * loss_gloss) / (lam_gloss + lam_translation)
epoch_loss += loss.item()
#print(test_sentence, '\n', GT_sentence)
BLEU4 = calc_BLEU(test_sentence, GT_sentence)
return epoch_loss / len(iterator), BLEU4
'''
# Predictions
len_trans_pred = torch.Tensor([len(trans) for trans in predict_translation]).type(torch.LongTensor)
log_prob_trans = predict_translation.contiguous().log_softmax(2)
len_orth_pred = torch.Tensor([len(orth_) for orth_ in predict_gloss]).type(torch.LongTensor)
log_prob_orth = predict_gloss.contiguous().log_softmax(2)
# GTs
orth_opt = orth.contiguous().type(torch.LongTensor).to(device)
trg_opt = trg[:, 1:].contiguous().type(torch.LongTensor).to(device)
len_orth_ipt = torch.Tensor([(sum(t > 0 for t in gloss)) for gloss in orth_opt]).type(torch.LongTensor)
len_trans_ipt = torch.Tensor([(sum(t > 0 for t in trans))-1 for trans in trg_opt]).type(torch.LongTensor)
# Loss
loss_translation = metric(log_prob_trans.permute(1, 0, 2), trg_opt, len_trans_pred, len_trans_ipt)
loss_gloss = metric(log_prob_orth.permute(1, 0, 2), orth_opt, len_orth_pred, len_orth_ipt)'''
def translate(Model, iterator, metric, data_tokenizer, max_len = 55):
Model.eval()
with torch.no_grad():
test_sentence = []
GT_sentence = []
for i, (features, glosses, translations) in enumerate(iterator):
src, orth, trg = \
features.to(device), glosses.to(device), translations.to(device)
src_mask = Model.make_source_mask(src)
enc_feature, predict_gloss = Model.Encoder(src, src_mask)
trg_index = [[data_tokenizer.stoi["<SOS>"]] for i in range(src.size(0))]
#print(trg_index)
for j in range(max_len):
#print(torch.LongTensor(trg_index).shape)
trg_tensor = torch.LongTensor(trg_index).to(device)
trg_mask = Model.make_target_mask(trg_tensor)
output = Model.Decoder(trg_tensor, enc_feature, src_mask, trg_mask)
output = nn.Softmax(dim=-1)(output)
pred_token = torch.argmax(output, dim=-1)[:,-1]
#print(torch.argmax(output, dim=-1))
for target_list, pred in zip(trg_index, pred_token.tolist()):
target_list.append(pred)
# Generate text file
for tokens in trg_index:
# Get argmax of tokens, bring it back to CPU.
#print(tokens)
itos = data_tokenizer.stringnize(tokens)
pred_string = ' '.join(itos)
test_sentence.append(pred_string)
for tokens in trg:
tokens = tokens.to(dtype=torch.long, device=torch.device("cpu"))
tokens = tokens.numpy()
# make string, append it to test_sentence
itos = data_tokenizer.stringnize(tokens[1:])
GT_string = ' '.join(itos)
GT_sentence.append(GT_string)
#print(torch.Tensor(trg_index).shape)
#print(test_sentence, '\n', GT_sentence)
BLEU4 = calc_BLEU(test_sentence, GT_sentence)
return BLEU4
# Count parameters and initialize weights
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def initialize_weights(m):
if hasattr(m, 'weight') and m.weight.dim() > 1:
nn.init.xavier_uniform_(m.weight.data)
def epoch_time(start, end):
elapsed_time = end - start
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
def main():
################################## Prepare datasets and dataloader ##################################
data_path = 'C:/Users/PC/2021-MLVU/SLT_project/'
train_data =
|
pd.read_csv(data_path + "PHOENIX-2014-T.train.corpus.csv", delimiter='|')
|
pandas.read_csv
|
#pylint: disable=line-too-long, too-many-public-methods, invalid-name
#pylint: disable=maybe-no-member, too-few-public-methods, no-member
from __future__ import absolute_import
from argparse import Namespace
from collections import OrderedDict
import filecmp
from io import StringIO
import os
import unittest
import numpy
import pandas as pd
from testfixtures import TempDirectory
import generollup.rollup as rollup
#TODO: cgates: How about we fix the chained assigments and get rid of this warning suppression.
pd.set_option('mode.chained_assignment', None)
def dataframe(input_data, sep="|", dtype=None):
return pd.read_csv(StringIO(input_data), sep=sep, header=0, dtype=dtype)
class MockFormatRule(object):
def __init__(self, format_df):
self.format_df = format_df
self.last_data_df = None
self.last_format_df = None
def format(self, data_df):
self.last_data_df = data_df
return self.format_df
def style(self, format_df):
self.last_format_df = format_df
return self.format_df
class GeneRollupTestCase(unittest.TestCase):
def setUp(self):
rollup._DBNSFP_COLUMN = 'dbNSFP_rollup_damaging'
rollup._EFFECT_COLUMN = 'SNPEFF_TOP_EFFECT_IMPACT'
rollup._GENE_SYMBOL = 'GENE_SYMBOL'
def test_create_df(self):
input_string =\
'''GENE_SYMBOL\tdbNSFP_rollup_damaging\tSNPEFF_TOP_EFFECT_IMPACT\tJQ_SUMMARY_SOM_COUNT|sampleA
1\t2\t3\t4\t5\t6\t7\t8'''
args = Namespace(input_file="",
output_file="",
sample_genotype_column_regex='JQ_SUMMARY_SOM_COUNT\|(.*)',
gene_column_name='GENE_SYMBOL',
tsv=False)
actual_df = rollup._create_df(StringIO(input_string), args)
self.assertEquals(["GENE_SYMBOL", "dbNSFP_rollup_damaging", "SNPEFF_TOP_EFFECT_IMPACT", "JQ_SUMMARY_SOM_COUNT|sampleA"],
list(actual_df.columns.values))
def test_create_df_missingDbnsfpAndSnpeffOkay(self):
input_string =\
'''GENE_SYMBOL\theaderA\theaderB\tJQ_SUMMARY_SOM_COUNT|sampleA
foo\t1\t2\t0'''
args = Namespace(input_file="",
output_file="",
sample_genotype_column_regex='JQ_SUMMARY_SOM_COUNT\|(.*)',
gene_column_name='GENE_SYMBOL',
tsv=False)
actual_df = rollup._create_df(StringIO(input_string), args)
self.assertEquals(["GENE_SYMBOL", "headerA", "headerB", "JQ_SUMMARY_SOM_COUNT|sampleA"],
list(actual_df.columns.values))
def test_create_df_missingDbNsfpOkay(self):
input_string =\
'''GENE_SYMBOL\tSNPEFF_TOP_EFFECT_IMPACT\tJQ_SUMMARY_SOM_COUNT|sampleA|TUMOR
1\t2\t3'''
args = Namespace(input_file="",
output_file="",
sample_genotype_column_regex='JQ_SUMMARY_SOM_COUNT\|(.*)\|TUMOR',
gene_column_name='GENE_SYMBOL',
effect_column_name='SNPEFF_TOP_EFFECT_IMPACT',
tsv=False)
actual_df = rollup._create_df(StringIO(input_string), args)
self.assertEquals(["GENE_SYMBOL", "SNPEFF_TOP_EFFECT_IMPACT", "JQ_SUMMARY_SOM_COUNT|sampleA|TUMOR"],
list(actual_df.columns.values))
def test_create_df_missingEffectOkay(self):
input_string =\
'''GENE_SYMBOL\tdbNSFP_rollup_damaging\tJQ_SUMMARY_SOM_COUNT|sampleA|TUMOR
1\t2\t3'''
args = Namespace(input_file="",
output_file="",
sample_genotype_column_regex='JQ_SUMMARY_SOM_COUNT\|(.*)\|TUMOR',
gene_column_name='GENE_SYMBOL',
dbnsfp_column_name='dbNSFP_rollup_damaging',
tsv=False)
actual_df = rollup._create_df(StringIO(input_string), args)
self.assertEquals(["GENE_SYMBOL", "dbNSFP_rollup_damaging", "JQ_SUMMARY_SOM_COUNT|sampleA|TUMOR"],
list(actual_df.columns.values))
def test_create_df_missingSamples(self):
input_string =\
'''GENE_SYMBOL\tdbNSFP_rollup_damaging\tSNPEFF_TOP_EFFECT_IMPACT
1\t2\t3
1\t2\t3'''
args = Namespace(input_file="",
output_file="",
sample_genotype_column_regex='JQ_SUMMARY_SOM_COUNT\|(P.)\|TUMOR',
gene_column_name='GENE_SYMBOL',
dbnsfp_column_name='dbNSFP_rollup_damaging',
effect_column_name='SNPEFF_TOP_EFFECT_IMPACT',
tsv=False)
self.assertRaisesRegexp(rollup.UsageError,
"Cannot determine sample genotype columns with supplied regex.*",
rollup._create_df,
StringIO(input_string),
args)
#TODO: (jebene) I can't figure out how to initialize this as having null values
def xtest_create_df_removesIntergenicVariants(self):
input_string =\
'''GENE_SYMBOL\tSNPEFF_TOP_EFFECT_IMPACT\tJQ_SUMMARY_SOM_COUNT
BRCA1\t2\t3
0\t4\t5
6\t7\t8'''
input_string = input_string.replace("0", numpy.nan)
args = Namespace(input_file="",
output_file="",
sample_genotype_column_regex='JQ_SUMMARY_SOM_COUNT\|(.*)\|TUMOR',
gene_column_name="GENE_SYMBOL",
tsv=False)
actual_df = rollup._create_df(StringIO(input_string), args)
self.assertEquals(["GENE_SYMBOL", "SNPEFF_TOP_EFFECT_IMPACT", "JQ_SUMMARY_SOM_COUNT"],
list(actual_df.columns.values))
self.assertEquals(["BRCA1", "6"], list(actual_df["GENE_SYMBOL"].values))
def test_sort_by_dbnsfp_rank(self):
input_string =\
'''gene_symbol\tJQ_SUMMARY_SOM_COUNT|P1|NORMAL\teffect_annotation|overall_effect_rank\tdbNSFP_annotation|overall_damaging_rank
BRCA1\th\t7\t2
EGFR\tm\t4\t3
SON\tm\t5\t1
BRCA2\tm\t5\t1
CREBBP\thhh\t6\t1'''
input_df = dataframe(input_string, sep="\t")
sorted_df = rollup._sort_by_dbnsfp_rank(input_df)
self.assertEquals(["BRCA2", "SON", "CREBBP", "BRCA1", "EGFR"], list(sorted_df["gene_symbol"].values))
self.assertEquals([1, 1, 1, 2, 3], list(sorted_df["dbNSFP_annotation|overall_damaging_rank"].values))
def test_combine_dfs(self):
summary_string =\
'''GENE_SYMBOL\tJQ_SUMMARY_SOM_COUNT|P1|NORMAL
BRCA1\t1
EGFR\t1
CREBBP\t1'''
summary_df = dataframe(summary_string, sep="\t")
summary_df = summary_df.set_index(["GENE_SYMBOL"])
dbNSFP_string =\
'''GENE_SYMBOL\tdbNSFP|P1|NORMAL
BRCA1\t2
CREBBP\t4'''
dbNSFP_df = dataframe(dbNSFP_string, sep="\t")
dbNSFP_df = dbNSFP_df.set_index(["GENE_SYMBOL"])
snpEff_string =\
'''GENE_SYMBOL\tsnpEff|P1|NORMAL
BRCA1\th
CREBBP\thh'''
snpEff_df = dataframe(snpEff_string, sep="\t")
snpEff_df = snpEff_df.set_index(["GENE_SYMBOL"])
dfs = OrderedDict()
dfs["summary"] = summary_df
dfs["dbNSFP"] = dbNSFP_df
dfs["snpEff"] = snpEff_df
actual = rollup._combine_dfs(dfs)
self.assertEquals(["BRCA1", "CREBBP"], list(actual.index.values))
def xtest_translate_to_excel(self):
with TempDirectory() as output_dir:
output_dir.write("output.xlsx", "")
output_file = os.path.join(output_dir.path, "output.xlsx")
data_string =\
'''gene symbol|PATIENT_A_SnpEff|PATIENT_A_dbNSFP|SnpEff_overall_impact_rank
MOD|mml|12|2
NULL1|||
HIGH|hhmlx|4|1'''
data_df = dataframe(data_string)
data_df.fillna("", inplace=True)
style_string = \
'''gene symbol|PATIENT_A_SnpEff|PATIENT_A_dbNSFP|SnpEff_overall_impact_rank
MOD|||
HIGH|||
NULL1|||'''
style_df = dataframe(style_string)
style_df["PATIENT_A_SnpEff"] = [{"font_size": "4", "bg_color": "#6699FF", "font_color": "#6699FF"},
"",
{"font_size": "4", "bg_color": "#003366", "font_color": "#003366"}]
style_df["PATIENT_A_dbNSFP"] = [{"font_size": "12", "bg_color": "#ffa500", "font_color": "#000000"},
"",
{"font_size": "12", "bg_color": "white", "font_color": "#003366"}]
style_df["SnpEff_overall_impact_rank"] = [{"font_size": "12", "bg_color": "white", "font_color": "#000000"},
"",
{"font_size": "12", "bg_color": "red", "font_color": "#000000"}]
style_df.fillna("", inplace=True)
writer = pd.ExcelWriter(output_file, engine="xlsxwriter")
rollup._translate_to_excel(data_df, style_df, writer)
script_dir = os.path.dirname(os.path.realpath(__file__))
expected_output = os.path.join(script_dir,
"functional_tests",
"translate_to_excel",
"expected_output.xlsx")
self.assertEquals(True, filecmp.cmp(expected_output, output_file))
def test_reset_style_gene_values(self):
data_string =\
'''gene_symbol|PATIENT_A_SnpEff
BRCA1|{"foo": "bar"}
TANK|{"foo": "bar"}
CREBBP|{"foo": "bar"}'''
data_df = dataframe(data_string)
actual = rollup._reset_style_gene_values(data_df)
actual = actual.applymap(str)
expected_string =\
'''gene_symbol|PATIENT_A_SnpEff
{}|{"foo": "bar"}
{}|{"foo": "bar"}
{}|{"foo": "bar"}'''
expected = dataframe(expected_string)
self.assertEquals(list(expected["gene_symbol"].values),
list(actual["gene_symbol"].values))
self.assertEquals(list(expected["PATIENT_A_SnpEff"].values),
list(actual["PATIENT_A_SnpEff"].values))
class dbNSFPTestCase(unittest.TestCase):
def setUp(self):
rollup._SAMPLENAME_REGEX = "JQ_SUMMARY_SOM_COUNT.*"
rollup._GENE_SYMBOL = "GENE_SYMBOL"
rollup._XLSX = False
def tearDown(self):
pass
def test_remove_unnecessary_columns(self):
FORMAT_DF = pd.DataFrame([[42] * 4] * 1)
formatRule = MockFormatRule(FORMAT_DF)
args = Namespace(input_file="",
output_file="",
sample_genotype_column_regex='JQ_SUMMARY_SOM_COUNT\|(.*)\|TUMOR',
gene_column_name="GENE_SYMBOL",
tsv=False)
dbNSFP = rollup.dbNSFP([formatRule], args)
input_string = \
'''GENE_SYMBOL\tdbNSFP_rollup_damaging\tBAZ\tJQ_SUMMARY_SOM_COUNT|P1|TUMOR\tJQ_SUMMARY_SOM_COUNT|P2|TUMOR\tFOO\tBAR'''
input_df = dataframe(input_string, sep="\t")
actual = dbNSFP._remove_unnecessary_columns(input_df)
self.assertEquals(4, len(actual.columns))
self.assertNotIn("BAZ", actual.columns)
self.assertNotIn("FOO", actual.columns)
self.assertNotIn("BAR", actual.columns)
def test_remove_invalid_rows(self):
FORMAT_DF = pd.DataFrame([[42] * 4] * 2)
formatRule = MockFormatRule(FORMAT_DF)
args = Namespace(input_file="",
output_file="",
sample_genotype_column_regex='JQ_SUMMARY_SOM_COUNT\|(.*)\|TUMOR',
)
dbNSFP = rollup.dbNSFP([formatRule], args)
input_string =\
'''GENE_SYMBOL\tdbNSFP_rollup_damaging\tJQ_SUMMARY_SOM_COUNT|P1|TUMOR\tJQ_SUMMARY_SOM_COUNT|P2|TUMOR
BRCA1\t2\t1\t1
BRCA1\t0\t.\t1
BRCA1\t\t1\t1
BRCA1\t.\t1\t1
CREBBP\t3\t0\t.'''
input_df = dataframe(input_string, sep="\t")
actual = dbNSFP._remove_invalid_rows(input_df)
self.assertEquals(["2", "3"], list(actual["dbNSFP_rollup_damaging"].values))
self.assertEquals(["1", "0"], list(actual["JQ_SUMMARY_SOM_COUNT|P1|TUMOR"].values))
self.assertEquals(["1", "."], list(actual["JQ_SUMMARY_SOM_COUNT|P2|TUMOR"].values))
def test_summarize_dataMatrix(self):
FORMAT_DF = pd.DataFrame([[42] * 4] * 2)
formatRule = MockFormatRule(FORMAT_DF)
args = Namespace(input_file="",
output_file="",
sample_genotype_column_regex='JQ_SUMMARY_SOM_COUNT\|(.*)\|TUMOR',
gene_column_name=rollup._GENE_SYMBOL,
input_gene_column_name=rollup._GENE_SYMBOL,
tsv=False)
dbNSFP = rollup.dbNSFP([formatRule], args)
input_string =\
'''GENE_SYMBOL\tdbNSFP_rollup_damaging\tJQ_SUMMARY_SOM_COUNT|P1|TUMOR\tJQ_SUMMARY_SOM_COUNT|P2|TUMOR
BRCA1\t2\t1\t1
BRCA1\t5\t.\t1
CREBBP\t3\t0\t.'''
input_df = dataframe(input_string, sep="\t")
input_df = input_df.applymap(str)
(data_df, style_dfs) = dbNSFP.summarize(input_df)
self.assertEquals(1, len(style_dfs))
self.assertEquals(data_df.shape, style_dfs[0].shape)
data_df = data_df.applymap(str)
expected_string =\
'''GENE_SYMBOL\tdbNSFP_annotation|overall_damaging_rank\tdbNSFP_annotation|damaging_total\tdbNSFP_annotation|damaging_votes|P1\tdbNSFP_annotation|damaging_votes|P2
BRCA1\t1\t9\t2\t7
CREBBP\t2\t0\t\t'''
expected_df = dataframe(expected_string, sep="\t", dtype=str)
expected_df = expected_df.set_index(["GENE_SYMBOL"])
expected_df.fillna("", inplace=True)
expected_df = expected_df.applymap(str)
self.assertEquals('\t'.join(expected_df.columns.values), '\t'.join(data_df.columns.values))
self.assertEquals([list(i) for i in expected_df.values], [list(i) for i in data_df.values])
def test_summarize_dataMatrixIgnoresNullOrZeroDamagingCounts(self):
FORMAT_DF = pd.DataFrame([[42] * 4] * 1)
formatRule = MockFormatRule(FORMAT_DF)
args = Namespace(input_file="",
output_file="",
sample_genotype_column_regex='JQ_SUMMARY_SOM_COUNT\|(.*)\|TUMOR',
gene_column_name=rollup._GENE_SYMBOL,
input_gene_column_name=rollup._GENE_SYMBOL,
tsv=False)
dbNSFP = rollup.dbNSFP([formatRule], args)
input_string =\
'''GENE_SYMBOL\tdbNSFP_rollup_damaging\tJQ_SUMMARY_SOM_COUNT|P1|TUMOR\tJQ_SUMMARY_SOM_COUNT|P2|TUMOR
BRCA1\t0\t1\t1
BRCA1\t1\t.\t1
CREBBP\t.\t0\t.'''
input_df = dataframe(input_string, sep="\t")
input_df = input_df.applymap(str)
(data_df, style_dfs) = dbNSFP.summarize(input_df)
self.assertEquals(1, len(style_dfs))
self.assertEquals(data_df.shape, style_dfs[0].shape)
data_df = data_df.applymap(str)
expected_string =\
'''GENE_SYMBOL\tdbNSFP_annotation|overall_damaging_rank\tdbNSFP_annotation|damaging_total\tdbNSFP_annotation|damaging_votes|P1\tdbNSFP_annotation|damaging_votes|P2
BRCA1\t1\t1\t\t1'''
expected_df = dataframe(expected_string, sep="\t", dtype=str)
expected_df = expected_df.set_index(["GENE_SYMBOL"])
expected_df.fillna("", inplace=True)
expected_df = expected_df.applymap(str)
self.assertEquals('\t'.join(expected_df.columns.values), '\t'.join(data_df.columns.values))
self.assertEquals([list(i) for i in expected_df.values], [list(i) for i in data_df.values])
def test_summarize_formatMatrix(self):
FORMAT_DF = pd.DataFrame([[42] * 4] * 2)
formatRule = MockFormatRule(FORMAT_DF)
args = Namespace(input_file="",
output_file="",
sample_genotype_column_regex='JQ_SUMMARY_SOM_COUNT\|(.*)\|TUMOR',
gene_column_name=rollup._GENE_SYMBOL,
input_gene_column_name=rollup._GENE_SYMBOL,
tsv=False)
dbNSFP = rollup.dbNSFP([formatRule], args)
input_string =\
'''GENE_SYMBOL\tdbNSFP_rollup_damaging\tJQ_SUMMARY_SOM_COUNT|P1|TUMOR\tJQ_SUMMARY_SOM_COUNT|P2|TUMOR
BRCA1\t2\t1\t1
BRCA1\t5\t.\t1
CREBBP\t3\t0\t.'''
input_df = dataframe(input_string, sep="\t")
input_df = input_df.applymap(str)
(dummy, format_dfs) = dbNSFP.summarize(input_df)
self.assertEquals(1, len(format_dfs))
self.assertIs(FORMAT_DF, format_dfs[0])
def test_summarize_multipleFormatRules(self):
FORMAT_DF1 =
|
pd.DataFrame([[42] * 4] * 2)
|
pandas.DataFrame
|
"""
Test the preprocessing module
"""
import os
from pathlib import Path
import yaml
import numpy as np
import pandas as pd
import geopandas as gpd
from shapely.geometry import box
import pytest
from gisutils import df2shp, shp2df
from sfrmaker.checks import check_monotonicity
from sfrmaker.preprocessing import (get_flowline_routing,
cull_flowlines,
preprocess_nhdplus,
clip_flowlines_to_polygon,
edit_flowlines,
swb_runoff_to_csv
)
@pytest.fixture(scope='module')
def test_data_path(project_root_path):
return os.path.join(project_root_path, 'sfrmaker/test/data/shellmound/')
@pytest.fixture(scope='module')
def active_area(outfolder):
active_area_tuple = -90.55, 33.5, -90.16, 33.86
active_area_poly = box(*active_area_tuple)
df = pd.DataFrame({'geometry': [active_area_poly], 'id': [0]})
active_area = os.path.join(outfolder, 'active_area.shp')
df2shp(df, active_area, crs=4269)
return active_area_tuple
@pytest.fixture(scope='module')
def outfolder(outdir):
outfolder = os.path.join(outdir, 'preprocessing')
if not os.path.exists(outfolder):
os.makedirs(outfolder)
return outfolder
@pytest.fixture(autouse=True)
def use_outfolder(outdir, outfolder):
wd = os.getcwd()
os.chdir(outfolder)
yield outfolder
os.chdir(wd)
# test with tuple bounding box and with shapefile
@pytest.fixture(scope='module')
def clipped_flowlines(test_data_path, outfolder, active_area):
#nhdpaths = ['/Users/aleaf/Documents/NHDPlus/NHDPlusMS/NHDPlus08']
nhdpaths = [os.path.join(test_data_path, 'NHDPlus08')]
active_area = os.path.join(outfolder, 'active_area.shp')
results = cull_flowlines(nhdpaths,
asum_thresh=None, intermittent_streams_asum_thresh=None,
cull_invalid=False, cull_isolated=False,
active_area=active_area,
outfolder=outfolder)
return results
@pytest.fixture(scope='module')
def culled_flowlines(test_data_path, outfolder, active_area):
nhdpaths = [os.path.join(test_data_path, 'NHDPlus08')]
results = cull_flowlines(nhdpaths,
asum_thresh=20, intermittent_streams_asum_thresh=50,
cull_invalid=True, cull_isolated=True,
active_area=active_area,
outfolder=outfolder)
return results
@pytest.fixture(scope='module')
def preprocessed_flowlines(test_data_path, culled_flowlines, outfolder, project_root_path):
kwargs = culled_flowlines.copy()
#kwargs['demfile'] = os.path.join(test_data_path, 'meras_100m_dem.tif')
kwargs['demfile'] = os.path.join(test_data_path, 'meras_30m_dem.tif')
#kwargs['demfile'] = os.path.join(project_root_path, 'examples/meras/dem_min_elevs_1000.tif')
kwargs['dem_length_units'] = 'feet'
kwargs['narwidth_shapefile'] = os.path.join(test_data_path, 'NARwidth.shp')
kwargs['waterbody_shapefiles'] = os.path.join(test_data_path,
'NHDPlus08/NHDSnapshot/Hydrography/NHDWaterbody.shp')
kwargs['asum_thresh'] = 20.
kwargs['width_from_asum_a_param'] = 0.0592
kwargs['width_from_asum_b_param'] = 0.5127
kwargs['known_connections'] = {17955195: 17955197,
17955197: 17955185,
17954979: 17954993,
17954993: 17955075
}
kwargs['logger'] = None
kwargs['output_length_units'] = 'meters'
kwargs['outfolder'] = outfolder
kwargs['project_epsg'] = 5070
preprocessed_flowlines = preprocess_nhdplus(**kwargs)
# check that the known_connections were routed correctly
for comid, tocomid in kwargs['known_connections'].items():
assert preprocessed_flowlines.loc[comid, 'tocomid'] == tocomid
out_shapefile = os.path.join(outfolder, 'preprocessed_flowlines.shp')
df2shp(preprocessed_flowlines, out_shapefile, crs=5070)
return preprocessed_flowlines
def test_cull_flowlines(clipped_flowlines, culled_flowlines, test_data_path,
outfolder, active_area):
nhdpaths = [os.path.join(test_data_path, 'NHDPlus08')]
source_nhdfiles = [os.path.join(nhdpaths[0], 'NHDSnapshot/Hydrography/NHDFlowline.shp'),
os.path.join(nhdpaths[0], 'NHDPlusAttributes/PlusFlowlineVAA.dbf'),
os.path.join(nhdpaths[0], 'NHDPlusAttributes/PlusFlow.dbf'),
os.path.join(nhdpaths[0], 'NHDPlusAttributes/elevslope.dbf')
]
original_sizes = np.array([os.path.getsize(f) for f in source_nhdfiles])
results = clipped_flowlines
clipped_sizes = np.array([os.path.getsize(f) for f in clipped_flowlines.values()])
culled_sizes = np.array([os.path.getsize(f) for f in culled_flowlines.values()])
assert np.all(culled_sizes > 0)
assert np.all(original_sizes > 0)
assert np.all(culled_sizes <= clipped_sizes)
assert np.all(clipped_sizes <= original_sizes)
results2 = cull_flowlines(nhdpaths,
asum_thresh=None, intermittent_streams_asum_thresh=None,
cull_invalid=False, cull_isolated=False,
active_area=active_area,
outfolder=outfolder)
assert results == results2
sizes2 = np.array([os.path.getsize(f) for f in results.values()])
assert np.all(sizes2 <= original_sizes)
assert results != culled_flowlines
def test_preprocess_nhdplus(preprocessed_flowlines):
fl = preprocessed_flowlines
# check some more connections
connections = {17955689: 17955711,
17956683: 17956745
}
for comid, tocomid in connections.items():
assert fl.loc[comid, 'tocomid'] == tocomid
# these lines should have been dropped
should_have_been_dropped = {17956691, 17955223}
for comid in should_have_been_dropped:
assert comid not in fl.index
# these lines should be included
should_be_included = {17955197, 17956745}
for comid in should_be_included:
assert comid in fl.index
# check that arbolate sums at known connections are correct
assert np.allclose(fl.loc[17955197, 'asum_calc'], 650., rtol=0.1)
# check that arbolate sums increase monotonically downstream
assert check_monotonicity(fl.index, fl.tocomid, fl.asum_calc, decrease=False)
# verify that for the lines that have narwidth estimates,
# the mean narwidth width and asum widths are within 20%
# (should hopefully catch unit conversion mistakes)
has_nw = ~fl.narwd_n.isna()
np.allclose(fl.loc[has_nw, 'width1asum'].mean(),
fl.loc[has_nw, 'narwd_mean'].mean(), rtol=0.2)
def test_preprocess_nhdplus_no_zonal_stats(culled_flowlines, preprocessed_flowlines,
test_data_path, outfolder):
kwargs = culled_flowlines.copy()
# kwargs['demfile'] = os.path.join(test_data_path, 'meras_100m_dem.tif')
kwargs['run_zonal_statistics'] = False
kwargs['flowline_elevations_file'] = Path(outfolder, 'flowlines_gt20km_buffers.shp')
kwargs['narwidth_shapefile'] = os.path.join(test_data_path, 'NARwidth.shp')
kwargs['waterbody_shapefiles'] = os.path.join(test_data_path,
'NHDPlus08/NHDSnapshot/Hydrography/NHDWaterbody.shp')
kwargs['asum_thresh'] = 20.
kwargs['width_from_asum_a_param'] = 0.0592
kwargs['width_from_asum_b_param'] = 0.5127
kwargs['known_connections'] = {17955195: 17955197,
17955197: 17955185,
17954979: 17954993,
17954993: 17955075
}
kwargs['logger'] = None
kwargs['output_length_units'] = 'meters'
kwargs['outfolder'] = outfolder
kwargs['project_epsg'] = 5070
preprocessed_flowlines2 = preprocess_nhdplus(**kwargs)
# verify that the same result is produced
# when reusing the shapefile output from zonal statistics
pd.testing.assert_frame_equal(preprocessed_flowlines, preprocessed_flowlines2)
# test manual updating of COMID end elevations
# (e.g. from measured stage data)
# assume mean stage at the Money, MS gage
# (near the upstream end of COMID 17991438)
# has been measured at 37.1 m (~1.2 meters above the min DEM elevation)
kwargs['update_up_elevations'] = {17991438: 37.1,
}
# and that mean stage near Greenwood
# (near the downstream end of COMID 18047242)
# has been measured at 37.0 m (~1.1 meters above the min DEM elevation)
kwargs['update_dn_elevations'] = {18047242: 37.0,
}
preprocessed_flowlines3 = preprocess_nhdplus(**kwargs)
assert preprocessed_flowlines3.loc[17991438, 'elevupsmo'] == 37.1
assert preprocessed_flowlines3.loc[18047242, 'elevdnsmo'] == 37.0
@pytest.mark.timeout(30) # projection issues will cause zonal stats to hang
def test_preprocess_nhdplus_no_narwidth(test_data_path, culled_flowlines, outfolder):
kwargs = culled_flowlines.copy()
kwargs['demfile'] = os.path.join(test_data_path, 'meras_100m_dem.tif')
kwargs['narwidth_shapefile'] = None
kwargs['waterbody_shapefiles'] = None
kwargs['asum_thresh'] = 20.
kwargs['known_connections'] = None
kwargs['logger'] = None
kwargs['outfolder'] = outfolder
kwargs['project_epsg'] = 5070
preprocess_nhdplus(**kwargs)
def test_clip_flowlines(preprocessed_flowlines, test_data_path):
clipped = clip_flowlines_to_polygon(preprocessed_flowlines,
os.path.join(test_data_path, 'active_area.shp'),
simplify_tol=100, logger=None)
assert len(clipped) < len(preprocessed_flowlines)
@pytest.mark.parametrize('flowlines', ('preprocessed_flowlines.shp',
None))
def test_edit_flowlines(flowlines, preprocessed_flowlines, test_data_path):
if flowlines is None:
flowlines = preprocessed_flowlines
flowline_edits_file = os.path.join(test_data_path, 'flowline_edits.yml')
edited_flowlines = edit_flowlines(flowlines,
flowline_edits_file, logger=None)
with open(flowline_edits_file) as src:
cfg = yaml.load(src, Loader=yaml.Loader)
# verify that flowlines were dropped
assert not any(set(cfg['drop_flowlines']).intersection(edited_flowlines.COMID))
# verify routing changes
for comid, tocomid in cfg['reroute_flowlines'].items():
assert edited_flowlines.loc[comid, 'tocomid'] == tocomid
add_flowlines = shp2df(os.path.join(test_data_path, 'yazoo.shp'))
assert not any(set(add_flowlines.comid).difference(edited_flowlines.index))
if isinstance(flowlines, str):
assert os.path.exists(flowlines[:-4] + '.prj')
def test_get_flowline_routing(datapath, project_root_path):
# change the directory to the root level
# (other tests in this module use the output folder)
wd = os.getcwd()
os.chdir(project_root_path)
NHDPlus_paths = [f'{datapath}/tylerforks/NHDPlus/']
plusflow_files = [f'{datapath}/tylerforks/NHDPlus/NHDPlusAttributes/PlusFlow.dbf']
mask = f'{datapath}/tylerforks/grid.shp'
df = get_flowline_routing(NHDPlus_paths=NHDPlus_paths,
mask=mask)
assert np.array_equal(df.columns, ['FROMCOMID', 'TOCOMID'])
df2 = get_flowline_routing(PlusFlow=plusflow_files)
pd.testing.assert_frame_equal(df2.loc[df2['FROMCOMID'].isin(df['FROMCOMID'])].head(),
df.head())
os.chdir(wd)
def test_swb_runoff_to_csv(test_data_path, outdir):
test_data_path = Path(test_data_path)
swb_netcdf_output = test_data_path / 'runoff__1999-01-01_to_2018-12-31__989_by_661.nc'
nhdplus_catchments_file = test_data_path / 'NHDPlus08/NHDPlusCatchment/Catchment.shp'
outfile = Path(outdir, 'swb_runoff_by_nhdplus_comid.csv')
swb_runoff_to_csv(swb_netcdf_output, nhdplus_catchments_file,
runoff_output_variable='runoff',
catchment_id_col='FEATUREID',
output_length_units='meters',
outfile=outfile)
df = pd.read_csv(outfile)
df['time'] = pd.to_datetime(df['time'])
#cat = gpd.read_file(nhdplus_catchments_file)
# model bounds
xoffset, yoffset = 500955, 1176285
nrow, ncol = 30, 35
dxy = 1000
x1 = xoffset + ncol * dxy
y1 = yoffset + nrow * dxy
within_model = (df.x.values > xoffset) & (df.x.values < x1) & \
(df.y.values > yoffset) & (df.y.values < y1)
df = df.loc[within_model]
mean_monthly_runoff = df.groupby(df['time'].dt.year)['runoff_m3d'].sum().mean()/12
# no idea if this is the right number but similar to test results for modflow-setup
# and serves as a benchmark in case the output changes
assert np.allclose(mean_monthly_runoff, 5e5, rtol=0.2)
# test with "rejected net infiltration added"
swb_rejected_net_inf_output = test_data_path / \
'irrtest_1000mrejected_net_infiltration__1999-01-01_to_2020-12-31__989_by_661.nc'
outfile2 = Path(outdir, 'swb_runoff_w_netinf_by_nhdplus_comid.csv')
swb_runoff_to_csv(swb_netcdf_output, nhdplus_catchments_file,
runoff_output_variable='runoff',
swb_rejected_net_inf_output=swb_rejected_net_inf_output,
catchment_id_col='FEATUREID',
output_length_units='meters',
outfile=outfile2)
df2 = pd.read_csv(outfile2)
df2['time'] =
|
pd.to_datetime(df2['time'])
|
pandas.to_datetime
|
import numpy as np
import pandas as pd
class PricingInstance(object):
"""Instance of the pricing problem"""
def __init__(self, rp, rn, Xp, Xn, v0, z0):
self.rp = rp
self.rn = rn
self.Xp = Xp
self.Xn = Xn
self.v0 = v0
self.z0 = z0
def eval_singletons(self, lambda1):
"""Evaluate all singleton solutions (adding one literal)"""
self.Rp = np.dot(self.rp, self.Xp)
self.Rn = np.dot(self.rn, self.Xn)
self.v1 = self.v0 - self.Rp - self.Rn + lambda1
self.v1 = pd.Series(self.v1, index=self.Xp.columns)
def compute_LB(self, lambda1):
"""Compute lower bound on higher-order solutions"""
Rp0 = self.rp.sum()
if np.ndim(lambda1):
self.LB = np.array([])
else:
self.LB = np.minimum(np.cumsum(np.sort(self.Rp)[::-1])[1:], Rp0)
self.LB += np.sort(self.Rn)[-2::-1]
self.LB -= lambda1 * np.arange(2, len(self.Rp)+1)
self.LB = self.v0 - self.LB
# Lower bound specific to each singleton solution
self.LB1 = self.v1 + self.Rp - Rp0 + lambda1
if len(self.LB):
self.LB1[self.LB1 < self.LB.min()] = self.LB.min()
def beam_search_K1(r, X, lambda0, lambda1, UB=0, D=10, B=5, wLB=0.5, eps=1e-6, stopEarly=True):
"""Beam search to generate SINGLE SOLUTION (K = 1) to pricing problem
Problem parameters:
r = cost vector (residuals)
X = binary feature DataFrame
lambda0 = fixed cost of a term
lambda1 = cost per literal
Algorithm parameters:
UB = initial upper bound on value of solutions
D = maximum degree
B = beam width
wLB = weight on lower bound in evaluating nodes
eps = numerical tolerance on comparisons
stopEarly = stop after current degree once solution is found
"""
# Initialize output
vOut = np.array([])
zOut = pd.Series(index=X.columns)
# Initialize queue with root instance
# Separate data according to positive and negative residuals
rp = r[r > 0]
rn = r[r < 0]
Xp = 1 - X.loc[r > 0]
Xn = 1 - X.loc[r < 0]
instCurr = [PricingInstance(rp, rn, Xp, Xn, r.sum() + lambda0, pd.Series(0, index=zOut.index))]
# Iterate over increasing degree while queue is non-empty
deg = 0
while (not len(vOut) or not stopEarly) and len(instCurr) and deg < D:
deg += 1
# Initialize list of children to process
vNext = np.array([])
vNextMax = np.inf
zNext = pd.DataFrame([], index=X.columns)
idxInstNext = np.array([], dtype=int)
idxFeatNext = np.array([])
# Process instances in queue
for (idxInst, inst) in enumerate(instCurr):
# Evaluate all singleton solutions
inst.eval_singletons(lambda1)
# Solutions that improve on current output
vCand = inst.v1[inst.v1 < UB - eps]
if len(vCand):
# Update output with best of these solutions
idxMin = vCand.idxmin()
UB = vCand[idxMin]
vOut = np.array([UB])
zOut = inst.z0.copy()
zOut[idxMin] = 1
# Compute lower bounds on higher-degree solutions
inst.compute_LB(lambda1)
# Evaluate children using weighted average of their costs and LBs
vChild = (1 - wLB) * inst.v1 + wLB * inst.LB1
# Best children with potential to improve on current output and current candidates (allow for duplicate removal)
vChild = vChild[(inst.LB1 < UB - eps) & (vChild < vNextMax - eps)].sort_values()[:B+idxInst]
if len(vChild):
# Feature indicators of these best children
zChild = pd.DataFrame(zOut.index.values[:,np.newaxis] == vChild.index.values, index=zOut.index).astype(int)
zChild = zChild.add(inst.z0, axis=0)
# Append to current candidates
vNext = np.append(vNext, vChild.values)
zNext = pd.concat([zNext, zChild], axis=1, ignore_index=True)
idxInstNext = np.append(idxInstNext, np.full(B+idxInst, idxInst))
idxFeatNext = np.append(idxFeatNext, vChild.index.values)
# Remove duplicates
_, idxUniq = np.unique(zNext, return_index=True, axis=1)
vNext = vNext[idxUniq]
zNext = zNext.iloc[:,idxUniq]
idxInstNext = idxInstNext[idxUniq]
idxFeatNext = idxFeatNext[idxUniq]
# Update candidates
idxBest = np.argsort(vNext)[:B]
vNext = vNext[idxBest]
if len(vNext):
vNextMax = vNext[-1]
zNext = zNext.iloc[:,idxBest]
zNext.columns = range(zNext.shape[1])
idxInstNext = idxInstNext[idxBest]
idxFeatNext = idxFeatNext[idxBest]
# Instances to process in next iteration
instNext = []
for (idxInst, i, idxz) in zip(idxInstNext, idxFeatNext, zNext):
# Create pricing instance
# Remove covered rows
rowKeep = instCurr[idxInst].Xp[i] == 0
rp = instCurr[idxInst].rp[rowKeep]
Xp = instCurr[idxInst].Xp.loc[rowKeep]
rowKeep = instCurr[idxInst].Xn[i] == 0
rn = instCurr[idxInst].rn[rowKeep]
Xn = instCurr[idxInst].Xn.loc[rowKeep]
# Remove redundant features
if type(Xp.columns) is pd.MultiIndex:
colKeep = pd.Series(Xp.columns.get_level_values(0) != i[0], index=Xp.columns)
if i[1] == '<=':
thresh = Xp[i[0]].columns.get_level_values(1).to_series().replace('NaN', np.nan).values
colKeep[i[0]] = (Xp[i[0]].columns.get_level_values(0) == '>') & (thresh < i[2])
elif i[1] == '>':
thresh = Xp[i[0]].columns.get_level_values(1).to_series().replace('NaN', np.nan).values
colKeep[i[0]] = (Xp[i[0]].columns.get_level_values(0) == '<=') & (thresh > i[2])
elif i[1] == '!=':
colKeep[i[0]] = (Xp[i[0]].columns.get_level_values(0) == '!=') & (Xp[i[0]].columns.get_level_values(1) != i[2])
Xp = Xp.loc[:, colKeep]
Xn = Xn.loc[:, colKeep]
instNext.append(PricingInstance(rp, rn, Xp, Xn, instCurr[idxInst].v1[i], zNext[idxz]))
instCurr = instNext
# Conjunctions corresponding to solutions
if zOut.count():
zOut =
|
pd.DataFrame(zOut)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import sys
plt.xlabel("Elapsed time [sec]")
plt.ylabel("Accuracy [%]")
TESTNAME = sys.argv[1]
TESTSEC = sys.argv[2]
nodes = [4, 6, 8, 10]
colors = ["b", "g", "r", "c"]
for node, color in zip(nodes, colors):
filename = "./{}/{}-{}/good.csv".format(TESTNAME, node, TESTSEC)
df = pd.read_csv(filename, header=None, names=('time', 'accuracy', 'loss'))
df["time"] = pd.to_datetime(df['time'])
ini = df["time"][0]
df = df.drop(index=0)
df["accuracy"] =
|
pd.to_numeric(df["accuracy"])
|
pandas.to_numeric
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Feature selection by leverage of Feature Analysis that include PFA and IFA.
------------------------------------------------------------------------------------------------------------------------
References:
- <NAME>, <NAME>, <NAME>, and <NAME>, "Feature selection using principal feature analysis," in Proceedings of
the 15th international conference on Multimedia. ACM, 2007, pp. 301-304.
------------------------------------------------------------------------------------------------------------------------
"""
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
# TODO: PFA
# TODO: IFA
import argparse
import sys
import warnings
import time
import numpy as np
import pandas as pd
from sklearn.preprocessing import scale
from terminaltables import DoubleTable
from kavica.imputation.base import data_structure_Compatibilization
from kavica.distance_measure import euclideanDistance
from sklearn.cluster import KMeans
import scipy.cluster.hierarchy as sch
from sklearn.cluster import AgglomerativeClustering
from sklearn import decomposition
from kavica.factor_analysis.factor_rotation import ObliqueRotation
import json
__all__ = ['has_fitted',
'sort_parja',
'_centroid',
'__configoration',
'_BaseFeatureAnalysis',
'PrincipalFeatureAnalysis',
'IndependentFeatureAnalysis']
def has_fitted(estimator, attributes, msg=None, all_or_any=any):
pass
def sort_parja(x, y, order=-1):
# TODO: parameter check (numpy array)
index = np.array(x).argsort(kind='quicksort')
return (np.array(x)[index][::order], np.array(y)[index][::order])
# TODO: it is needed to rewrite it with method parameter
def _centroid(x, label):
datafreamX = pd.DataFrame(x)
datafreamX['label'] = label
return datafreamX.groupby('label').mean()
# read the configuration file for preparing the features
def __configoration(config, data):
# read the configuration file
with open(config, 'r') as config:
config_dict = json.load(config)
# Read the data file
df = pd.read_csv(data)
# config the data set based on configuration information
df = df[list(config_dict['hardware_counters'].values())] # sub set of features
df.replace([np.inf, -np.inf], np.nan, inplace=True)
lastShape = df.shape
# Remove the all zero rows
df = df[(df.T != 0).any()]
print("The {} row are full null that are eliminated.".format(lastShape[0] - df.shape[0]))
lastShape = df.shape
# Remove all NaN columns.
df = df.ix[:, (pd.notnull(df)).any()]
print("The {} columns are full null that are eliminated.".format(lastShape[1] - df.shape[1]))
if config_dict['missing_values'] == 'mean':
df.fillna(df.mean(), inplace=True)
if config_dict['scale']:
df = pd.DataFrame(scale(df), index=df.index, columns=df.columns)
print(df.mean(axis=0), df.std(axis=0))
return df
def arguments_parser():
# set/receive the arguments
if len(sys.argv) == 1:
# It is used for testing and developing time.
arguments = ['config/config_FS_gromacs_64p_INS_CYC.json',
'../parser/source.csv',
'-k',
'2',
'-m',
'IFA'
]
sys.argv.extend(arguments)
else:
pass
# parse the arguments
parser = argparse.ArgumentParser(description='The files that are needed for selecting features most important.')
parser.add_argument('config', help='A .json configuration file that included the'
'thread numbers,hardware counters and etc.')
parser.add_argument('csvfile', help='A .csv dataset file')
parser.add_argument('-k',
dest='k',
default=2,
action='store',
type=int,
help="It significances the number of the most important features.")
parser.add_argument('-m',
dest='m',
default='IFA',
choices=['IFA', 'PFA'],
action='store',
type=str.upper,
help="The feature selection method that is either IFA or PFA.")
args = parser.parse_args()
if args.k < 2:
raise ValueError("Selected features have to be (=> 2). It is set {}".format(args.k))
return ({"configPath": args.config,
"csvPath": args.csvfile,
"k_features": args.k,
"featureSelectionMethod": args.m})
######################################################################
# Base class
######################################################################
class _BaseFeatureAnalysis(object):
"""Initialize the feature analysis.
Parameters
"""
def __init__(self, X=None, method=None, k_features=None):
self.hasFitted = False
self.originData = X
self.k_features = k_features
self.featureScore = {'method': method,
'scores':
|
pd.DataFrame(columns=['features', 'subset', 'internal_score'])
|
pandas.DataFrame
|
"""Model evaluation tools."""
import os
import sklearn
import itertools
import numpy as np
import pandas as pd
import sklearn.metrics as skmetrics
from matplotlib import pyplot as plt
from healthcareai.common.healthcareai_error import HealthcareAIError
DIAGONAL_LINE_COLOR = '#bbbbbb'
DIAGONAL_LINE_STYLE = 'dotted'
def compute_roc(y_test, probability_predictions):
"""
Compute TPRs, FPRs, best cutoff, ROC auc, and raw thresholds.
Args:
y_test (list) : true label values corresponding to the predictions. Also length n.
probability_predictions (list) : predictions coming from an ML algorithm of length n.
Returns:
dict:
"""
_validate_predictions_and_labels_are_equal_length(probability_predictions, y_test)
# Calculate ROC
false_positive_rates, true_positive_rates, roc_thresholds = skmetrics.roc_curve(y_test, probability_predictions)
roc_auc = skmetrics.roc_auc_score(y_test, probability_predictions)
# get ROC ideal cutoffs (upper left, or 0,1)
roc_distances = (false_positive_rates - 0) ** 2 + (true_positive_rates - 1) ** 2
# To prevent the case where there are two points with the same minimum distance, return only the first
# np.where returns a tuple (we want the first element in the first array)
roc_index = np.where(roc_distances == np.min(roc_distances))[0][0]
best_tpr = true_positive_rates[roc_index]
best_fpr = false_positive_rates[roc_index]
ideal_roc_cutoff = roc_thresholds[roc_index]
return {'roc_auc': roc_auc,
'best_roc_cutoff': ideal_roc_cutoff,
'best_true_positive_rate': best_tpr,
'best_false_positive_rate': best_fpr,
'true_positive_rates': true_positive_rates,
'false_positive_rates': false_positive_rates,
'roc_thresholds': roc_thresholds}
def compute_pr(y_test, probability_predictions):
"""
Compute Precision-Recall, thresholds and PR AUC.
Args:
y_test (list) : true label values corresponding to the predictions. Also length n.
probability_predictions (list) : predictions coming from an ML algorithm of length n.
Returns:
dict:
"""
_validate_predictions_and_labels_are_equal_length(probability_predictions, y_test)
# Calculate PR
precisions, recalls, pr_thresholds = skmetrics.precision_recall_curve(y_test, probability_predictions)
pr_auc = skmetrics.average_precision_score(y_test, probability_predictions)
# get ideal cutoffs for suggestions (upper right or 1,1)
pr_distances = (precisions - 1) ** 2 + (recalls - 1) ** 2
# To prevent the case where there are two points with the same minimum distance, return only the first
# np.where returns a tuple (we want the first element in the first array)
pr_index = np.where(pr_distances == np.min(pr_distances))[0][0]
best_precision = precisions[pr_index]
best_recall = recalls[pr_index]
ideal_pr_cutoff = pr_thresholds[pr_index]
return {'pr_auc': pr_auc,
'best_pr_cutoff': ideal_pr_cutoff,
'best_precision': best_precision,
'best_recall': best_recall,
'precisions': precisions,
'recalls': recalls,
'pr_thresholds': pr_thresholds}
def calculate_regression_metrics(trained_sklearn_estimator, x_test, y_test):
"""
Given a trained estimator, calculate metrics.
Args:
trained_sklearn_estimator (sklearn.base.BaseEstimator): a scikit-learn estimator that has been `.fit()`
y_test (numpy.ndarray): A 1d numpy array of the y_test set (predictions)
x_test (numpy.ndarray): A 2d numpy array of the x_test set (features)
Returns:
dict: A dictionary of metrics objects
"""
# Get predictions
predictions = trained_sklearn_estimator.predict(x_test)
# Calculate individual metrics
mean_squared_error = skmetrics.mean_squared_error(y_test, predictions)
mean_absolute_error = skmetrics.mean_absolute_error(y_test, predictions)
result = {'mean_squared_error': mean_squared_error, 'mean_absolute_error': mean_absolute_error}
return result
def calculate_binary_classification_metrics(trained_sklearn_estimator, x_test, y_test):
"""
Given a trained estimator, calculate metrics.
Args:
trained_sklearn_estimator (sklearn.base.BaseEstimator): a scikit-learn estimator that has been `.fit()`
x_test (numpy.ndarray): A 2d numpy array of the x_test set (features)
y_test (numpy.ndarray): A 1d numpy array of the y_test set (predictions)
Returns:
dict: A dictionary of metrics objects
"""
# Squeeze down y_test to 1D
y_test = np.squeeze(y_test)
_validate_predictions_and_labels_are_equal_length(x_test, y_test)
# Get binary and probability classification predictions
binary_predictions = np.squeeze(trained_sklearn_estimator.predict(x_test))
probability_predictions = np.squeeze(trained_sklearn_estimator.predict_proba(x_test)[:, 1])
# Calculate accuracy
accuracy = skmetrics.accuracy_score(y_test, binary_predictions)
roc = compute_roc(y_test, probability_predictions)
pr = compute_pr(y_test, probability_predictions)
# Unpack the roc and pr dictionaries so the metric lookup is easier for plot and ensemble methods
return {'accuracy': accuracy, **roc, **pr}
def roc_plot_from_thresholds(roc_thresholds_by_model, save=False, debug=False):
"""
From a given dictionary of thresholds by model, create a ROC curve for each model.
Args:
roc_thresholds_by_model (dict): A dictionary of ROC thresholds by model name.
save (bool): False to display the image (default) or True to save it (but not display it)
debug (bool): verbost output.
"""
# TODO consolidate this and PR plotter into 1 function
# TODO make the colors randomly generated from rgb values
# Cycle through the colors list
color_iterator = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
# Initialize plot
plt.figure()
plt.xlabel('False Positive Rate (FPR)')
plt.ylabel('True Positive Rate (TRP)')
plt.title('Receiver Operating Characteristic (ROC)')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.plot([0, 1], [0, 1], linestyle=DIAGONAL_LINE_STYLE, color=DIAGONAL_LINE_COLOR)
# Calculate and plot for each model
for color, (model_name, metrics) in zip(color_iterator, roc_thresholds_by_model.items()):
# Extract model name and metrics from dictionary
roc_auc = metrics['roc_auc']
tpr = metrics['true_positive_rates']
fpr = metrics['false_positive_rates']
best_true_positive_rate = metrics['best_true_positive_rate']
best_false_positive_rate = metrics['best_false_positive_rate']
if debug:
print('{} model:'.format(model_name))
print(pd.DataFrame({'FPR': fpr, 'TPR': tpr}))
# plot the line
label = '{} (ROC AUC = {})'.format(model_name, round(roc_auc, 2))
plt.plot(fpr, tpr, color=color, label=label)
plt.plot([best_false_positive_rate], [best_true_positive_rate], marker='*', markersize=10, color=color)
plt.legend(loc="lower right")
if save:
plt.savefig('ROC.png')
source_path = os.path.dirname(os.path.abspath(__file__))
print('\nROC plot saved in: {}'.format(source_path))
plt.show()
def pr_plot_from_thresholds(pr_thresholds_by_model, save=False, debug=False):
"""
From a given dictionary of thresholds by model, create a PR curve for each model.
Args:
pr_thresholds_by_model (dict): A dictionary of PR thresholds by model name.
save (bool): False to display the image (default) or True to save it (but not display it)
debug (bool): verbost output.
"""
# TODO consolidate this and PR plotter into 1 function
# TODO make the colors randomly generated from rgb values
# Cycle through the colors list
color_iterator = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
# Initialize plot
plt.figure()
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision Recall (PR)')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.plot([0, 1], [1, 0], linestyle=DIAGONAL_LINE_STYLE, color=DIAGONAL_LINE_COLOR)
# Calculate and plot for each model
for color, (model_name, metrics) in zip(color_iterator, pr_thresholds_by_model.items()):
# Extract model name and metrics from dictionary
pr_auc = metrics['pr_auc']
precision = metrics['precisions']
recall = metrics['recalls']
best_recall = metrics['best_recall']
best_precision = metrics['best_precision']
if debug:
print('{} model:'.format(model_name))
print(
|
pd.DataFrame({'Recall': recall, 'Precision': precision})
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, RandomizedSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
# load data
# data set used: https://www.kaggle.com/ronitf/heart-disease-uci
# help with RandomizedSearchCV: https://towardsdatascience.com/hyperparameter-tuning-the-random-forest-in-python-using-scikit-learn-28d2aa77dd74
heart_df =
|
pd.read_csv('framingham.csv', sep=',')
|
pandas.read_csv
|
"""
Packages to use :
tsfresh
tsfel https://tsfel.readthedocs.io/en/latest/
sktime
feature tools : https://docs.featuretools.com/en/stable/automated_feature_engineering/handling_time.html
Cesium http://cesium-ml.org/docs/feature_table.html
Feature Tools for advacned fewatures `https://github.com/Featuretools/predict-remaining-useful-life/blob/master/Advanced%20Featuretools%20RUL.ipynb
"""
import pandas as pd
import tsfresh
from tsfresh import extract_relevant_features, extract_features
import numpy as np
import pdb
import re
def features_time_basic(df, input_raw_path = None, dir_out = None, features_group_name = None, auxiliary_csv_path = None, drop_cols = None, index_cols = None, merge_cols_mapping = None, cat_cols = None, id_cols = None, dep_col = None, max_rows = 10):
df['date_t'] = pd.to_datetime(df['date'])
df['year'] = df['date_t'].dt.year
df['month'] = df['date_t'].dt.month
df['week'] = df['date_t'].dt.week
df['day'] = df['date_t'].dt.day
df['dayofweek'] = df['date_t'].dt.dayofweek
cat_cols = []
return df[['year', 'month', 'week', 'day', 'dayofweek'] + id_cols], cat_cols
def features_lag(df, fname):
out_df = df[['item_id', 'dept_id', 'cat_id', 'store_id', 'state_id']]
###############################################################################
# day lag 29~57 day and last year's day lag 1~28 day
day_lag = df.iloc[:,-28:]
day_year_lag = df.iloc[:,-393:-365]
day_lag.columns = [str("lag_{}_day".format(i)) for i in range(29,57)] # Rename columns
day_year_lag.columns = [str("lag_{}_day_of_last_year".format(i)) for i in range(1,29)]
# Rolling mean(3) and (7) and (28) and (84) 29~57 day and last year's day lag 1~28 day
rolling_3 = df.iloc[:,-730:].T.rolling(3).mean().T.iloc[:,-28:]
rolling_3.columns = [str("rolling3_lag_{}_day".format(i)) for i in range(29,57)] # Rename columns
rolling_3_year = df.iloc[:,-730:].T.rolling(3).mean().T.iloc[:,-393:-365]
rolling_3_year.columns = [str("rolling3_lag_{}_day_of_last_year".format(i)) for i in range(1,29)]
rolling_7 = df.iloc[:,-730:].T.rolling(7).mean().T.iloc[:,-28:]
rolling_7.columns = [str("rolling7_lag_{}_day".format(i)) for i in range(29,57)] # Rename columns
rolling_7_year = df.iloc[:,-730:].T.rolling(7).mean().T.iloc[:,-393:-365]
rolling_7_year.columns = [str("rolling7_lag_{}_day_of_last_year".format(i)) for i in range(1,29)]
rolling_28 = df.iloc[:,-730:].T.rolling(28).mean().T.iloc[:,-28:]
rolling_28.columns = [str("rolling28_lag_{}_day".format(i)) for i in range(29,57)]
rolling_28_year = df.iloc[:,-730:].T.rolling(28).mean().T.iloc[:,-393:-365]
rolling_28_year.columns = [str("rolling28_lag_{}_day_of_last_year".format(i)) for i in range(1,29)]
rolling_84 = df.iloc[:,-730:].T.rolling(84).mean().T.iloc[:,-28:]
rolling_84.columns = [str("rolling84_lag_{}_day".format(i)) for i in range(29,57)]
rolling_84_year = df.iloc[:,-730:].T.rolling(84).mean().T.iloc[:,-393:-365]
rolling_84_year.columns = [str("rolling84_lag_{}_day_of_last_year".format(i)) for i in range(1,29)]
# monthly lag 1~18 month
month_lag = pd.DataFrame({})
for i in range(1,19):
if i == 1:
monthly = df.iloc[:,-28*i:].T.sum().T
month_lag["monthly_lag_{}_month".format(i)] = monthly
else:
monthly = df.iloc[:, -28*i:-28*(i-1)].T.sum().T
month_lag["monthly_lag_{}_month".format(i)] = monthly
# combine day lag and monthly lag
out_df = pd.concat([out_df, day_lag], axis=1)
out_df = pd.concat([out_df, day_year_lag], axis=1)
out_df = pd.concat([out_df, rolling_3], axis=1)
out_df = pd.concat([out_df, rolling_3_year], axis=1)
out_df = pd.concat([out_df, rolling_7], axis=1)
out_df = pd.concat([out_df, rolling_7_year], axis=1)
out_df = pd.concat([out_df, rolling_28], axis=1)
out_df = pd.concat([out_df, rolling_28_year], axis=1)
out_df = pd.concat([out_df, rolling_84], axis=1)
out_df = pd.concat([out_df, rolling_84_year], axis=1)
out_df = pd.concat([out_df, month_lag], axis=1)
###############################################################################
# dept_id
group_dept = df.groupby("dept_id").sum()
# day lag 29~57 day and last year's day lag 1~28 day
dept_day_lag = group_dept.iloc[:,-28:]
dept_day_year_lag = group_dept.iloc[:,-393:-365]
dept_day_lag.columns = [str("dept_lag_{}_day".format(i)) for i in range(29,57)]
dept_day_year_lag.columns = [str("dept_lag_{}_day_of_last_year".format(i)) for i in range(1,29)]
# monthly lag 1~18 month
month_dept_lag = pd.DataFrame({})
for i in range(1,19):
if i == 1:
monthly_dept = group_dept.iloc[:,-28*i:].T.sum().T
month_dept_lag["dept_monthly_lag_{}_month".format(i)] = monthly_dept
elif i >= 7 and i < 13:
continue
else:
monthly = group_dept.iloc[:, -28*i:-28*(i-1)].T.sum().T
month_dept_lag["dept_monthly_lag_{}_month".format(i)] = monthly_dept
# combine out df
out_df = pd.merge(out_df, dept_day_lag, left_on="dept_id", right_index=True, how="left")
out_df = pd.merge(out_df, dept_day_year_lag, left_on="dept_id", right_index=True, how="left")
out_df = pd.merge(out_df, month_dept_lag, left_on="dept_id", right_index=True, how="left")
###############################################################################
# cat_id
group_cat = df.groupby("cat_id").sum()
# day lag 29~57 day and last year's day lag 1~28 day
cat_day_lag = group_cat.iloc[:,-28:]
cat_day_year_lag = group_cat.iloc[:,-393:-365]
cat_day_lag.columns = [str("cat_lag_{}_day".format(i)) for i in range(29,57)]
cat_day_year_lag.columns = [str("cat_lag_{}_day_of_last_year".format(i)) for i in range(1,29)]
# monthly lag 1~18 month
month_cat_lag = pd.DataFrame({})
for i in range(1,19):
if i == 1:
monthly_cat = group_cat.iloc[:,-28*i:].T.sum().T
month_cat_lag["cat_monthly_lag_{}_month".format(i)] = monthly_cat
elif i >= 7 and i < 13:
continue
else:
monthly_cat = group_cat.iloc[:, -28*i:-28*(i-1)].T.sum().T
month_cat_lag["dept_monthly_lag_{}_month".format(i)] = monthly_cat
# combine out df
out_df = pd.merge(out_df, cat_day_lag, left_on="cat_id", right_index=True, how="left")
out_df = pd.merge(out_df, cat_day_year_lag, left_on="cat_id", right_index=True, how="left")
out_df = pd.merge(out_df, month_cat_lag, left_on="cat_id", right_index=True, how="left")
###############################################################################
# store_id
group_store = df.groupby("store_id").sum()
# day lag 29~57 day and last year's day lag 1~28 day
store_day_lag = group_store.iloc[:,-28:]
store_day_year_lag = group_store.iloc[:,-393:-365]
store_day_lag.columns = [str("store_lag_{}_day".format(i)) for i in range(29,57)]
store_day_year_lag.columns = [str("store_lag_{}_day_of_last_year".format(i)) for i in range(1,29)]
# monthly lag 1~18 month
month_store_lag = pd.DataFrame({})
for i in range(1,19):
if i == 1:
monthly_store = group_store.iloc[:,-28*i:].T.sum().T
month_store_lag["store_monthly_lag_{}_month".format(i)] = monthly_store
elif i >= 7 and i <13:
continue
else:
monthly_store = group_store.iloc[:, -28*i:-28*(i-1)].T.sum().T
month_store_lag["store_monthly_lag_{}_month".format(i)] = monthly_store
# combine out df
out_df = pd.merge(out_df, store_day_lag, left_on="store_id", right_index=True, how="left")
out_df = pd.merge(out_df, store_day_year_lag, left_on="store_id", right_index=True, how="left")
out_df = pd.merge(out_df, month_store_lag, left_on="store_id", right_index=True, how="left")
###############################################################################
# state_id
group_state = df.groupby("state_id").sum()
# day lag 29~57 day and last year's day lag 1~28 day
state_day_lag = group_state.iloc[:,-28:]
state_day_year_lag = group_state.iloc[:,-393:-365]
state_day_lag.columns = [str("state_lag_{}_day".format(i)) for i in range(29,57)]
state_day_year_lag.columns = [str("state_lag_{}_day_of_last_year".format(i)) for i in range(1,29)]
# monthly lag 1~18 month
month_state_lag = pd.DataFrame({})
for i in range(1,13):
if i == 1:
monthly_state = group_state.iloc[:,-28*i:].T.sum().T
month_state_lag["state_monthly_lag_{}_month".format(i)] = monthly_state
elif i >= 7 and i < 13:
continue
else:
monthly_state = group_state.iloc[:, -28*i:-28*(i-1)].T.sum().T
month_state_lag["state_monthly_lag_{}_month".format(i)] = monthly_state
# combine out df
out_df = pd.merge(out_df, state_day_lag, left_on="state_id", right_index=True, how="left")
out_df = pd.merge(out_df, state_day_year_lag, left_on="state_id", right_index=True, how="left")
out_df = pd.merge(out_df, month_state_lag, left_on="state_id", right_index=True, how="left")
###############################################################################
# category flag
col_list = ['dept_id', 'cat_id', 'store_id', 'state_id']
df_cate_oh = pd.DataFrame({})
for i in col_list:
df_oh = pd.get_dummies(df[i])
df_cate_oh = pd.concat([df_cate_oh, df_oh], axis=1)
out_df =
|
pd.concat([out_df, df_cate_oh], axis=1)
|
pandas.concat
|
import functools
import numpy as np
import scipy
import scipy.linalg
import scipy
import scipy.sparse as sps
import scipy.sparse.linalg as spsl
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import logging
import tables as tb
import os
import sandy
import pytest
pd.options.display.float_format = '{:.5e}'.format
__author__ = "<NAME>"
__all__ = [
"CategoryCov",
"EnergyCov",
"triu_matrix",
"corr2cov",
"random_corr",
"random_cov",
"sample_distribution",
]
S = np.array([[1, 1, 1],
[1, 2, 1],
[1, 3, 1]])
var = np.array([[0, 0, 0],
[0, 2, 0],
[0, 0, 3]])
minimal_covtest = pd.DataFrame(
[[9437, 2, 1e-2, 9437, 2, 1e-2, 0.02],
[9437, 2, 2e5, 9437, 2, 2e5, 0.09],
[9437, 2, 1e-2, 9437, 102, 1e-2, 0.04],
[9437, 2, 2e5, 9437, 102, 2e5, 0.05],
[9437, 102, 1e-2, 9437, 102, 1e-2, 0.01],
[9437, 102, 2e5, 9437, 102, 2e5, 0.01]],
columns=["MAT", "MT", "E", "MAT1", "MT1", 'E1', "VAL"]
)
def cov33csv(func):
def inner(*args, **kwargs):
key = "<KEY>"
kw = kwargs.copy()
if key in kw:
if kw[key]:
print(f"found argument '{key}', ignore oher arguments")
out = func(
*args,
index_col=[0, 1, 2],
header=[0, 1, 2],
)
out.index.names = ["MAT", "MT", "E"]
out.columns.names = ["MAT", "MT", "E"]
return out
else:
del kw[key]
out = func(*args, **kw)
return out
return inner
class _Cov(np.ndarray):
"""Covariance matrix treated as a `numpy.ndarray`.
Methods
-------
corr
extract correlation matrix
corr2cov
produce covariance matrix given correlation matrix and standard
deviation array
eig
get covariance matrix eigenvalues and eigenvectors
get_L
decompose and extract lower triangular matrix
sampling
draw random samples
"""
def __new__(cls, arr):
obj = np.ndarray.__new__(cls, arr.shape, float)
obj[:] = arr[:]
if not obj.ndim == 2:
raise sandy.Error("covariance matrix must have two dimensions")
if not np.allclose(obj, obj.T):
raise sandy.Error("covariance matrix must be symmetric")
if (np.diag(arr) < 0).any():
raise sandy.Error("covariance matrix must have positive variances")
return obj
@staticmethod
def _up2down(self):
U = np.triu(self)
L = np.triu(self, 1).T
C = U + L
return C
def eig(self):
"""
Extract eigenvalues and eigenvectors.
Returns
-------
`Pandas.Series`
real part of eigenvalues sorted in descending order
`np.array`
matrix of eigenvectors
"""
E, V = scipy.linalg.eig(self)
E, V = E.real, V.real
return E, V
def corr(self):
"""Extract correlation matrix.
.. note:: zeros on the covariance matrix diagonal are translated
into zeros also on the the correlation matrix diagonal.
Returns
-------
`sandy.formats.utils.Cov`
correlation matrix
"""
std = np.sqrt(np.diag(self))
with np.errstate(divide='ignore', invalid='ignore'):
coeff = np.true_divide(1, std)
coeff[~ np.isfinite(coeff)] = 0 # -inf inf NaN
corr = np.multiply(np.multiply(self.T, coeff).T, coeff)
return self.__class__(corr)
def _reduce_size(self):
"""
Reduces the size of the matrix, erasing the null values.
Returns
-------
nonzero_idxs : numpy.ndarray
The indices of the diagonal that are not null.
cov_reduced : sandy.core.cov._Cov
The reduced matrix.
"""
nonzero_idxs = np.flatnonzero(np.diag(self))
cov_reduced = self[nonzero_idxs][:, nonzero_idxs]
return nonzero_idxs, cov_reduced
@classmethod
def _restore_size(cls, nonzero_idxs, cov_reduced, dim):
"""
Restore the size of the matrix
Parameters
----------
nonzero_idxs : numpy.ndarray
The indices of the diagonal that are not null.
cov_reduced : sandy.core.cov._Cov
The reduced matrix.
dim : int
Dimension of the original matrix.
Returns
-------
cov : sandy.core.cov._Cov
Matrix of specified dimensions.
"""
cov = _Cov(np.zeros((dim, dim)))
for i, ni in enumerate(nonzero_idxs):
cov[ni, nonzero_idxs] = cov_reduced[i]
return cov
def sampling(self, nsmp, seed=None):
"""
Extract random samples from the covariance matrix, either using
the cholesky or the eigenvalue decomposition.
Parameters
----------
nsmp : `int`
number of samples
seed : `int`
seed for the random number generator (default is `None`)
Returns
-------
`np.array`
2D array of random samples with dimension `(self.shape[0], nsmp)`
"""
dim = self.shape[0]
np.random.seed(seed=seed)
y = np.random.randn(dim, nsmp)
nonzero_idxs, cov_reduced = self._reduce_size()
L_reduced = cov_reduced.get_L()
L = self.__class__._restore_size(nonzero_idxs, L_reduced, dim)
samples = np.array(L.dot(y))
return samples
def get_L(self):
"""
Extract lower triangular matrix `L` for which `L*L^T == self`.
Returns
-------
`np.array`
lower triangular matrix
"""
try:
L = scipy.linalg.cholesky(
self,
lower=True,
overwrite_a=False,
check_finite=False
)
except np.linalg.linalg.LinAlgError:
E, V = self.eig()
E[E <= 0] = 0
Esqrt = np.diag(np.sqrt(E))
M = V.dot(Esqrt)
Q, R = scipy.linalg.qr(M.T)
L = R.T
return L
class CategoryCov():
"""
Properties
----------
data
covariance matrix as a dataframe
size
first dimension of the covariance matrix
Methods
-------
corr2cov
create a covariance matrix given a correlation matrix and a standard
deviation vector
from_stack
create a covariance matrix from a stacked `pd.DataFrame`
from_stdev
construct a covariance matrix from a stdev vector
from_var
construct a covariance matrix from a variance vector
get_corr
extract correlation matrix from covariance matrix
get_eig
extract eigenvalues and eigenvectors from covariance matrix
get_L
extract lower triangular matrix such that $C=L L^T$
get_std
extract standard deviations from covariance matrix
invert
calculate the inverse of the matrix
sampling
extract perturbation coefficients according to chosen distribution
and covariance matrix
"""
def __repr__(self):
return self.data.__repr__()
def __init__(self, *args, **kwargs):
self.data = pd.DataFrame(*args, **kwargs)
@property
def data(self):
"""
Covariance matrix as a dataframe.
Attributes
----------
index : `pandas.Index` or `pandas.MultiIndex`
indices
columns : `pandas.Index` or `pandas.MultiIndex`
columns
values : `numpy.array`
covariance values as `float`
Returns
-------
`pandas.DataFrame`
covariance matrix
Notes
-----
..note :: In the future, another tests will be implemented to check
that the covariance matrix is symmetric and have positive variances.
Examples
--------
>>> with pytest.raises(TypeError): sandy.CategoryCov(np.array[1])
>>> with pytest.raises(TypeError): sandy.CategoryCov(np.array([[1, 2], [2, -4]]))
>>> with pytest.raises(TypeError): sandy.CategoryCov(np.array([[1, 2], [3, 4]]))
"""
return self._data
@data.setter
def data(self, data):
self._data = pd.DataFrame(data, dtype=float)
if not len(data.shape) == 2 and data.shape[0] == data.shape[1]:
raise TypeError("Covariance matrix must have two dimensions")
if not (np.diag(data) >= 0).all():
raise TypeError("Covariance matrix must have positive variance")
sym_limit = 10
# Round to avoid numerical fluctuations
if not (data.values.round(sym_limit) == data.values.T.round(sym_limit)).all():
raise TypeError("Covariance matrix must be symmetric")
@property
def size(self):
return self.data.values.shape[0]
def get_std(self):
"""
Extract standard deviations.
Returns
-------
`pandas.Series`
1d array of standard deviations
Examples
--------
>>> sandy.CategoryCov([[1, 0.4],[0.4, 1]]).get_std()
0 1.00000e+00
1 1.00000e+00
Name: STD, dtype: float64
"""
cov = self.to_sparse().diagonal()
std = np.sqrt(cov)
return pd.Series(std, index=self.data.index, name="STD")
def get_eig(self, tolerance=None):
"""
Extract eigenvalues and eigenvectors.
Parameters
----------
tolerance : `float`, optional, default is `None`
replace all eigenvalues smaller than a given tolerance with zeros.
The replacement condition is implemented as:
.. math::
$$
\frac{e_i}{e_{MAX}} < tolerance
$$
Then, a `tolerance=1e-3` will replace all eigenvalues
1000 times smaller than the largest eigenvalue.
A `tolerance=0` will replace all negative eigenvalues.
Returns
-------
`Pandas.Series`
array of eigenvalues
`pandas.DataFrame`
matrix of eigenvectors
Notes
-----
.. note:: only the real part of the eigenvalues is preserved
.. note:: the discussion associated to the implementeation
of this algorithm is available [here](https://github.com/luca-fiorito-11/sandy/discussions/135)
Examples
--------
Extract eigenvalues of correlation matrix.
>>> sandy.CategoryCov([[1, 0.4], [0.4, 1]]).get_eig()[0]
0 1.40000e+00
1 6.00000e-01
Name: EIG, dtype: float64
Extract eigenvectors of correlation matrix.
>>> sandy.CategoryCov([[1, 0.4], [0.4, 1]]).get_eig()[1]
0 1
0 7.07107e-01 -7.07107e-01
1 7.07107e-01 7.07107e-01
Extract eigenvalues of covariance matrix.
>>> sandy.CategoryCov([[0.1, 0.1], [0.1, 1]]).get_eig()[0]
0 8.90228e-02
1 1.01098e+00
Name: EIG, dtype: float64
Set up a tolerance.
>>> sandy.CategoryCov([[0.1, 0.1], [0.1, 1]]).get_eig(tolerance=0.1)[0]
0 0.00000e+00
1 1.01098e+00
Name: EIG, dtype: float64
Test with negative eigenvalues.
>>> sandy.CategoryCov([[1, 2], [2, 1]]).get_eig()[0]
0 3.00000e+00
1 -1.00000e+00
Name: EIG, dtype: float64
Replace negative eigenvalues.
>>> sandy.CategoryCov([[1, 2], [2, 1]]).get_eig(tolerance=0)[0]
0 3.00000e+00
1 0.00000e+00
Name: EIG, dtype: float64
Check output size.
>>> cov = sandy.CategoryCov.random_cov(50, seed=11)
>>> assert cov.get_eig()[0].size == cov.data.shape[0] == 50
>>> sandy.CategoryCov([[1, 0.2, 0.1], [0.2, 2, 0], [0.1, 0, 3]]).get_eig()[0]
0 9.56764e-01
1 2.03815e+00
2 3.00509e+00
Name: EIG, dtype: float64
Real test on H1 file
>>> endf6 = sandy.get_endf6_file("jeff_33", "xs", 10010)
>>> ek = sandy.energy_grids.CASMO12
>>> err = endf6.get_errorr(ek_errorr=ek, err=1)
>>> cov = err.get_cov()
>>> cov.get_eig()[0].sort_values(ascending=False).head(7)
0 3.66411e-01
1 7.05311e-03
2 1.55346e-03
3 1.60175e-04
4 1.81374e-05
5 1.81078e-06
6 1.26691e-07
Name: EIG, dtype: float64
>>> assert not (cov.get_eig()[0] >= 0).all()
>>> assert (cov.get_eig(tolerance=0)[0] >= 0).all()
"""
E, V = scipy.linalg.eig(self.data)
E = pd.Series(E.real, name="EIG")
V = pd.DataFrame(V.real)
if tolerance is not None:
E[E/E.max() < tolerance] = 0
return E, V
def get_corr(self):
"""
Extract correlation matrix.
Returns
-------
df : :obj: `CetgoryCov`
correlation matrix
Examples
--------
>>> sandy.CategoryCov([[4, 2.4],[2.4, 9]]).get_corr()
0 1
0 1.00000e+00 4.00000e-01
1 4.00000e-01 1.00000e+00
"""
cov = self.data.values
with np.errstate(divide='ignore', invalid='ignore'):
coeff = np.true_divide(1, self.get_std().values)
coeff[~ np.isfinite(coeff)] = 0 # -inf inf NaN
corr = np.multiply(np.multiply(cov, coeff).T, coeff)
df = pd.DataFrame(
corr,
index=self.data.index,
columns=self.data.columns,
)
return self.__class__(df)
def invert(self, rows=None):
"""
Method for calculating the inverse matrix.
Parameters
----------
tables : `bool`, optional
Option to use row calculation for matrix calculations. The
default is False.
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
Returns
-------
`CategoryCov`
The inverse matrix.
Examples
--------
>>> S = sandy.CategoryCov(np.diag(np.array([1, 2, 3])))
>>> S.invert()
0 1 2
0 1.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 5.00000e-01 0.00000e+00
2 0.00000e+00 0.00000e+00 3.33333e-01
>>> S = sandy.CategoryCov(np.diag(np.array([0, 2, 3])))
>>> S.invert()
0 1 2
0 0.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 5.00000e-01 0.00000e+00
2 0.00000e+00 0.00000e+00 3.33333e-01
>>> S = sandy.CategoryCov(np.diag(np.array([0, 2, 3])))
>>> S.invert(rows=1)
0 1 2
0 0.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 5.00000e-01 0.00000e+00
2 0.00000e+00 0.00000e+00 3.33333e-01
"""
index = self.data.index
columns = self.data.columns
M_nonzero_idxs, M_reduce = reduce_size(self.data)
cov = sps.csc_matrix(M_reduce.values)
rows_ = cov.shape[0] if rows is None else rows
data = sparse_tables_inv(cov, rows=rows_)
M_inv = restore_size(M_nonzero_idxs, data, len(self.data))
M_inv = M_inv.reindex(index=index, columns=columns).fillna(0)
return self.__class__(M_inv)
def log2norm_cov(self, mu):
"""
Transform covariance matrix to the one of the underlying normal
distribution.
Parameters
----------
mu : iterable
The desired mean values of the target lognormal distribution.
Returns
-------
`CategoryCov` of the underlying normal covariance matrix
Examples
--------
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> cov.log2norm_cov(pd.Series(np.ones(cov.data.shape[0]), index=cov.data.index))
A B C
A 2.19722e+00 1.09861e+00 1.38629e+00
B 1.09861e+00 2.39790e+00 1.60944e+00
C 1.38629e+00 1.60944e+00 2.07944e+00
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = pd.Series([1, 2, .5], index=["A", "B", "C"])
>>> cov.log2norm_cov(mu)
A B C
A 2.19722e+00 6.93147e-01 1.94591e+00
B 6.93147e-01 1.25276e+00 1.60944e+00
C 1.94591e+00 1.60944e+00 3.36730e+00
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = [1, 2, .5]
>>> cov.log2norm_cov(mu)
A B C
A 2.19722e+00 6.93147e-01 1.94591e+00
B 6.93147e-01 1.25276e+00 1.60944e+00
C 1.94591e+00 1.60944e+00 3.36730e+00
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = np.array([1, 2, .5])
>>> cov.log2norm_cov(mu)
A B C
A 2.19722e+00 6.93147e-01 1.94591e+00
B 6.93147e-01 1.25276e+00 1.60944e+00
C 1.94591e+00 1.60944e+00 3.36730e+00
Notes
-----
..notes:: Reference for the equation is 10.1016/j.nima.2012.06.036
.. math::
$$
cov(lnx_i, lnx_j) = \ln\left(\frac{cov(x_i,x_j)}{<x_i>\cdot<x_j>}+1\right)
$$
"""
mu_ = np.diag(1 / pd.Series(mu))
mu_ = pd.DataFrame(mu_, index=self.data.index, columns=self.data.index)
return self.__class__(np.log(self.sandwich(mu_).data + 1))
def log2norm_mean(self, mu):
"""
Transform mean values to the mean values of the undelying normal
distribution.
Parameters
----------
mu : iterable
The target mean values.
Returns
-------
`pd.Series` of the underlyig normal distribution mean values
Examples
--------
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = pd.Series(np.ones(cov.data.shape[0]), index=cov.data.index)
>>> cov.log2norm_mean(mu)
A -1.09861e+00
B -1.19895e+00
C -1.03972e+00
dtype: float64
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> cov.log2norm_mean([1, 1, 1])
A -1.09861e+00
B -1.19895e+00
C -1.03972e+00
dtype: float64
>>> cov = CategoryCov(pd.DataFrame([[8, 2, 3], [2, 10, 4], [3, 4, 7]], index=['A', 'B', 'C'], columns=['A', 'B', 'C']))
>>> mu = np.ones(cov.data.shape[0])
>>> cov.log2norm_mean(mu)
A -1.09861e+00
B -1.19895e+00
C -1.03972e+00
dtype: float64
Reindexing example
"""
mu_ = pd.Series(mu)
mu_.index = self.data.index
return np.log(mu_**2 / np.sqrt(np.diag(self.data) + mu_**2))
def sampling(self, nsmp, seed=None, rows=None, pdf='normal',
tolerance=None, relative=True):
"""
Extract perturbation coefficients according to chosen distribution with
covariance from given covariance matrix. See note for non-normal
distribution sampling.
The samples' mean will be 1 or 0 depending on `relative` kwarg.
Parameters
----------
nsmp : `int`
number of samples.
seed : `int`, optional, default is `None`
seed for the random number generator (by default use `numpy`
dafault pseudo-random number generator).
rows : `int`, optional, default is `None`
option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
pdf : `str`, optional, default is 'normal'
random numbers distribution.
Available distributions are:
* `'normal'`
* `'uniform'`
* `'lognormal'`
tolerance : `float`, optional, default is `None`
replace all eigenvalues smaller than a given tolerance with zeros.
relative : `bool`, optional, default is `True`
flag to switch between relative and absolute covariance matrix
handling
* `True`: samples' mean will be 1
* `False`: samples' mean will be 0
Returns
-------
`sandy.Samples`
object containing samples
Notes
-----
.. note:: sampling with uniform distribution is performed on
diagonal covariance matrix, neglecting all correlations.
.. note:: sampling with lognormal distribution gives a set of samples
with mean=1 as lognormal distribution can not have mean=0.
Therefore, `relative` parameter does not apply to it.
Examples
--------
Draw 3 sets of samples using custom seed:
>>> sandy.CategoryCov([[1, 0.4],[0.4, 1]]).sampling(3, seed=11)
0 1
0 -7.49455e-01 -2.13159e+00
1 1.28607e+00 1.10684e+00
2 1.48457e+00 9.00879e-01
>>> sandy.CategoryCov([[1, 0.4],[0.4, 1]]).sampling(3, seed=11, rows=1)
0 1
0 -7.49455e-01 -2.13159e+00
1 1.28607e+00 1.10684e+00
2 1.48457e+00 9.00879e-01
>>> sample = sandy.CategoryCov([[1, 0.4],[0.4, 1]]).sampling(1000000, seed=11)
>>> sample.data.cov()
0 1
0 9.98662e-01 3.99417e-01
1 3.99417e-01 9.98156e-01
Small negative eigenvalue:
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(3, seed=11, tolerance=0)
0 1
0 2.74945e+00 5.21505e+00
1 7.13927e-01 1.07147e+00
2 5.15435e-01 1.64683e+00
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, tolerance=0).data.cov()
0 1
0 9.98662e-01 -1.99822e-01
1 -1.99822e-01 2.99437e+00
Sampling with different `pdf`:
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(3, seed=11, pdf='uniform', tolerance=0)
0 1
0 -1.07578e-01 2.34960e+00
1 -6.64587e-01 5.21222e-01
2 8.72585e-01 9.12563e-01
>>> sandy.CategoryCov([[1, .2],[.2, 3]]).sampling(3, seed=11, pdf='lognormal', tolerance=0)
0 1
0 3.03419e+00 1.57919e+01
1 5.57248e-01 4.74160e-01
2 4.72366e-01 6.50840e-01
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='uniform', tolerance=0).data.cov()
0 1
0 1.00042e+00 -1.58806e-03
1 -1.58806e-03 3.00327e+00
>>> sandy.CategoryCov([[1, .2],[.2, 3]]).sampling(1000000, seed=11, pdf='lognormal', tolerance=0).data.cov()
0 1
0 1.00219e+00 1.99199e-01
1 1.99199e-01 3.02605e+00
`relative` kwarg usage:
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='normal', tolerance=0, relative=True).data.mean(axis=0)
0 1.00014e+00
1 9.99350e-01
dtype: float64
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='normal', tolerance=0, relative=False).data.mean(axis=0)
0 1.41735e-04
1 -6.49679e-04
dtype: float64
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='uniform', tolerance=0, relative=True).data.mean(axis=0)
0 9.98106e-01
1 9.99284e-01
dtype: float64
>>> sandy.CategoryCov([[1, -.2],[-.2, 3]]).sampling(1000000, seed=11, pdf='uniform', tolerance=0, relative=False).data.mean(axis=0)
0 -1.89367e-03
1 -7.15929e-04
dtype: float64
Lognormal distribution sampling indeoendency from `relative` kwarg
>>> sandy.CategoryCov([[1, .2],[.2, 3]]).sampling(1000000, seed=11, pdf='lognormal', tolerance=0, relative=True).data.mean(axis=0)
0 9.99902e-01
1 9.99284e-01
dtype: float64
>>> sandy.CategoryCov([[1, .2],[.2, 3]]).sampling(1000000, seed=11, pdf='lognormal', tolerance=0, relative=False).data.mean(axis=0)
0 9.99902e-01
1 9.99284e-01
dtype: float64
"""
dim = self.data.shape[0]
pdf_ = pdf if pdf != 'lognormal' else 'normal'
y = sample_distribution(dim, nsmp, seed=seed, pdf=pdf_) - 1
y = sps.csc_matrix(y)
# the covariance matrix to decompose is created depending on the chosen
# pdf
if pdf == 'uniform':
to_decompose = self.__class__(np.diag(np.diag(self.data)))
elif pdf == 'lognormal':
ones = np.ones(self.data.shape[0])
to_decompose = self.log2norm_cov(ones)
else:
to_decompose = self
L = sps.csr_matrix(to_decompose.get_L(rows=rows,
tolerance=tolerance))
samples = pd.DataFrame(L.dot(y).toarray(), index=self.data.index,
columns=list(range(nsmp)))
if pdf == 'lognormal':
# mean value of lognormally sampled distributions will be one by
# defaul
samples = np.exp(samples.add(self.log2norm_mean(ones), axis=0))
elif relative:
samples += 1
return sandy.Samples(samples.T)
@classmethod
def from_var(cls, var):
"""
Construct the covariance matrix from the variance vector.
Parameters
----------
var : 1D iterable
Variance vector.
Returns
-------
`CategoryCov`
Object containing the covariance matrix.
Example
-------
>>> S = pd.Series(np.array([0, 2, 3]), index=pd.Index([1, 2, 3]))
>>> cov = sandy.CategoryCov.from_var(S)
>>> cov
1 2 3
1 0.00000e+00 0.00000e+00 0.00000e+00
2 0.00000e+00 2.00000e+00 0.00000e+00
3 0.00000e+00 0.00000e+00 3.00000e+00
>>> assert type(cov) is sandy.CategoryCov
>>> S = sandy.CategoryCov.from_var((1, 2, 3))
>>> S
0 1 2
0 1.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 2.00000e+00 0.00000e+00
2 0.00000e+00 0.00000e+00 3.00000e+00
>>> assert type(S) is sandy.CategoryCov
>>> assert type(sandy.CategoryCov.from_var([1, 2, 3])) is sandy.CategoryCov
"""
var_ = pd.Series(var)
cov_values = sps.diags(var_.values).toarray()
cov = pd.DataFrame(cov_values,
index=var_.index, columns=var_.index)
return cls(cov)
@classmethod
def from_stdev(cls, std):
"""
Construct the covariance matrix from the standard deviation vector.
Parameters
----------
std : `pandas.Series`
Standard deviations vector.
Returns
-------
`CategoryCov`
Object containing the covariance matrix.
Example
-------
>>> S = pd.Series(np.array([0, 2, 3]), index=pd.Index([1, 2, 3]))
>>> cov = sandy.CategoryCov.from_stdev(S)
>>> cov
1 2 3
1 0.00000e+00 0.00000e+00 0.00000e+00
2 0.00000e+00 4.00000e+00 0.00000e+00
3 0.00000e+00 0.00000e+00 9.00000e+00
>>> assert type(cov) is sandy.CategoryCov
>>> S = sandy.CategoryCov.from_stdev((1, 2, 3))
>>> S
0 1 2
0 1.00000e+00 0.00000e+00 0.00000e+00
1 0.00000e+00 4.00000e+00 0.00000e+00
2 0.00000e+00 0.00000e+00 9.00000e+00
>>> assert type(S) is sandy.CategoryCov
>>> assert type(sandy.CategoryCov.from_stdev([1, 2, 3])) is sandy.CategoryCov
"""
std_ = pd.Series(std)
var = std_ * std_
return cls.from_var(var)
@classmethod
def from_stack(cls, data_stack, index, columns, values, rows=10000000,
kind='upper'):
"""
Create a covariance matrix from a stacked dataframe.
Parameters
----------
data_stack : `pd.Dataframe`
Stacked dataframe.
index : 1D iterable, optional
Index of the final covariance matrix.
columns : 1D iterable, optional
Columns of the final covariance matrix.
values : `str`, optional
Name of the column where the values are located.
rows : `int`, optional
Number of rows to take into account into each loop. The default
is 10000000.
kind : `str`, optional
Select if the stack data represents upper or lower triangular
matrix. The default is 'upper.
Returns
-------
`sandy.CategoryCov`
Covarinace matrix.
Examples
--------
If the stack data represents the covariance matrix:
>>> S = pd.DataFrame(np.array([[1, 1, 1], [1, 2, 1], [1, 1, 1]]))
>>> S = S.stack().reset_index().rename(columns = {'level_0': 'dim1', 'level_1': 'dim2', 0: 'cov'})
>>> S = S[S['cov'] != 0]
>>> sandy.CategoryCov.from_stack(S, index=['dim1'], columns=['dim2'], values='cov', kind='all')
dim2 0 1 2
dim1
0 1.00000e+00 1.00000e+00 1.00000e+00
1 1.00000e+00 2.00000e+00 1.00000e+00
2 1.00000e+00 1.00000e+00 1.00000e+00
If the stack data represents only the upper triangular part of the
covariance matrix:
>>> test_1 = sandy.CategoryCov.from_stack(minimal_covtest, index=["MAT", "MT", "E"], columns=["MAT1", "MT1", "E1"], values='VAL').data
>>> test_1
MAT1 9437
MT1 2 102
E1 1.00000e-02 2.00000e+05 1.00000e-02 2.00000e+05
MAT MT E
9437 2 1.00000e-02 2.00000e-02 0.00000e+00 4.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 9.00000e-02 0.00000e+00 5.00000e-02
102 1.00000e-02 4.00000e-02 0.00000e+00 1.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 5.00000e-02 0.00000e+00 1.00000e-02
>>> test_2 = sandy.CategoryCov.from_stack(minimal_covtest, index=["MAT", "MT", "E"], columns=["MAT1", "MT1", "E1"], values='VAL', rows=1).data
>>> test_2
MAT1 9437
MT1 2 102
E1 1.00000e-02 2.00000e+05 1.00000e-02 2.00000e+05
MAT MT E
9437 2 1.00000e-02 2.00000e-02 0.00000e+00 4.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 9.00000e-02 0.00000e+00 5.00000e-02
102 1.00000e-02 4.00000e-02 0.00000e+00 1.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 5.00000e-02 0.00000e+00 1.00000e-02
>>> assert (test_1 == test_2).all().all()
If the stack data represents only the lower triangular part of the
covariance matrix:
>>> test_1 = sandy.CategoryCov.from_stack(minimal_covtest, index=["MAT1", "MT1", "E1"], columns=["MAT", "MT", "E"], values='VAL', kind="lower").data
>>> test_1
MAT 9437
MT 2 102
E 1.00000e-02 2.00000e+05 1.00000e-02 2.00000e+05
MAT1 MT1 E1
9437 2 1.00000e-02 2.00000e-02 0.00000e+00 4.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 9.00000e-02 0.00000e+00 5.00000e-02
102 1.00000e-02 4.00000e-02 0.00000e+00 1.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 5.00000e-02 0.00000e+00 1.00000e-02
>>> test_2 = sandy.CategoryCov.from_stack(minimal_covtest, index=["MAT1", "MT1", "E1"], columns=["MAT", "MT", "E"], values='VAL', kind="lower", rows=1).data
>>> test_2
MAT 9437
MT 2 102
E 1.00000e-02 2.00000e+05 1.00000e-02 2.00000e+05
MAT1 MT1 E1
9437 2 1.00000e-02 2.00000e-02 0.00000e+00 4.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 9.00000e-02 0.00000e+00 5.00000e-02
102 1.00000e-02 4.00000e-02 0.00000e+00 1.00000e-02 0.00000e+00
2.00000e+05 0.00000e+00 5.00000e-02 0.00000e+00 1.00000e-02
>>> assert (test_1 == test_2).all().all()
"""
cov = segmented_pivot_table(data_stack, rows=rows, index=index,
columns=columns, values=values)
if kind == 'all':
return cls(cov)
else:
return triu_matrix(cov, kind=kind)
def _gls_Vy_calc(self, S, rows=None):
"""
2D calculated output using
.. math::
$$
S\cdot V_{x_{prior}}\cdot S.T
$$
Parameters
----------
S : 2D iterable
Sensitivity matrix (MXN).
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
Returns
-------
`pd.DataFrame`
Covariance matrix `Vy_calc` calculated using
S.dot(Vx_prior).dot(S.T)
Example
-------
>>> S = np.array([[1, 2], [3, 4]])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> cov._gls_Vy_calc(S)
0 1
0 5.00000e+00 1.10000e+01
1 1.10000e+01 2.50000e+01
>>> cov._gls_Vy_calc(S, rows=1)
0 1
0 5.00000e+00 1.10000e+01
1 1.10000e+01 2.50000e+01
"""
index = pd.DataFrame(S).index
S_ = pd.DataFrame(S).values
rows_ = S_.shape[0] if rows is None else rows
Vy_calc = sparse_tables_dot_multiple([S_, self.data.values,
S_.T], rows=rows_)
return pd.DataFrame(Vy_calc, index=index, columns=index)
def _gls_G(self, S, Vy_extra=None, rows=None):
"""
2D calculated output using
.. math::
$$
S\cdot V_{x_{prior}}\cdot S.T + V_{y_{extra}}
$$
Parameters
----------
S : 2D iterable
Sensitivity matrix (MXN).
Vy_extra : 2D iterable, optional.
2D covariance matrix for y_extra (MXM).
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
Returns
-------
`pd.DataFrame`
Covariance matrix `G` calculated using
S.dot(Vx_prior).dot(S.T) + Vy_extra
Example
-------
>>> S = np.array([[1, 2], [3, 4]])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> Vy = np.diag(pd.Series([1, 1]))
>>> cov._gls_G(S, Vy)
0 1
0 6.00000e+00 1.10000e+01
1 1.10000e+01 2.60000e+01
>>> cov._gls_G(S, Vy, rows=1)
0 1
0 6.00000e+00 1.10000e+01
1 1.10000e+01 2.60000e+01
>>> cov._gls_G(S)
0 1
0 5.00000e+00 1.10000e+01
1 1.10000e+01 2.50000e+01
>>> cov._gls_G(S, rows=1)
0 1
0 5.00000e+00 1.10000e+01
1 1.10000e+01 2.50000e+01
"""
# GLS_sensitivity:
Vy_calc = self._gls_Vy_calc(S, rows=rows)
if Vy_extra is not None:
# Data in a appropriate format
Vy_extra_ = sandy.CategoryCov(Vy_extra).data
index = pd.DataFrame(Vy_extra).index
Vy_extra_ = Vy_extra_.values
Vy_calc = Vy_calc.reindex(index=index, columns=index).fillna(0).values
# Calculations:
Vy_calc = sps.csr_matrix(Vy_calc)
Vy_extra_ = sps.csr_matrix(Vy_extra_)
# G calculation
G = Vy_calc + Vy_extra_
G = pd.DataFrame(G.toarray(), index=index, columns=index)
else:
G = Vy_calc
return G
def _gls_G_inv(self, S, Vy_extra=None, rows=None):
"""
2D calculated output using
.. math::
$$
\left(S\cdot V_{x_{prior}}\cdot S.T + V_{y_{extra}}\right)^{-1}
$$
Parameters
----------
S : 2D iterable
Sensitivity matrix (MXN).
Vy_extra : 2D iterable, optional
2D covariance matrix for y_extra (MXM).
rows : `int`, optional
Option to use row calculation for matrix calculations. This option
defines the number of lines to be taken into account in each loop.
The default is None.
Returns
-------
`pd.DataFrame`
Covariance matrix `G_inv` calculated using
(S.dot(Vx_prior).dot(S.T) + Vy_extra)^-1
Example
-------
>>> S = np.array([[1, 2], [3, 4]])
>>> cov = sandy.CategoryCov.from_var([1, 1])
>>> Vy = np.diag(pd.Series([1, 1]))
>>> cov._gls_G_inv(S, Vy)
0 1
0 7.42857e-01 -3.14286e-01
1 -3.14286e-01 1.71429e-01
>>> cov._gls_G_inv(S, Vy, rows=1)
0 1
0 7.42857e-01 -3.14286e-01
1 -3.14286e-01 1.71429e-01
>>> cov._gls_G_inv(S)
0 1
0 6.25000e+00 -2.75000e+00
1 -2.75000e+00 1.25000e+00
>>> cov._gls_G_inv(S, rows=1)
0 1
0 6.25000e+00 -2.75000e+00
1 -2.75000e+00 1.25000e+00
"""
if Vy_extra is not None:
index =
|
pd.DataFrame(Vy_extra)
|
pandas.DataFrame
|
import glob
import os
import sys
from pprint import pprint
import pandas as pd
from ..constants import (DATA_DIR, DTYPES, RAW_DATA_DIR, USE_VAR_LIST,
USE_VAR_LIST_DICT, USE_VAR_LIST_DICT_REVERSE)
from ..download.nppes import nppes_month_list
from ..utils.utils import coerce_dtypes, month_name_to_month_num
def get_filepaths_from_dissemination_zips(folder):
'''
Each dissemination folder contains a large / bulk data file of the format
npidata_20050523-yearmonthday.csv, sometimes
deep in a subdirectory. This identifies the likeliest candidate and maps
in a dictionary to the main zip folder
'''
zip_paths = os.path.join(folder, 'NPPES_Data_Dissemination*')
stub = os.path.join(folder, 'NPPES_Data_Dissemination_')
folders = [x for x
in glob.glob(zip_paths)
if not x.endswith('.zip')]
folders = [x for x in folders if 'Weekly' not in x]
possbl = list(set(glob.glob(zip_paths + '/**/*npidata_*', recursive=True)))
possbl = [x for x in possbl if 'Weekly' not in x]
paths = {(x.partition(stub)[2].split('/')[0].split('_')[1],
str(month_name_to_month_num(
x.partition(stub)[2].split('/')[0].split('_')[0]))): x
for x in possbl if 'eader' not in x}
assert len(folders) == len(paths)
return paths
def get_weekly_dissemination_zips(folder):
'''
Each weekly update folder contains a large / bulk data file of the format
npidata_pfile_20200323-20200329, representing the week covered
Will need to later add functionality for weekly updates for ploc2 files
'''
zip_paths = os.path.join(folder, 'NPPES_Data_Dissemination*')
stub = os.path.join(folder, 'NPPES_Data_Dissemination_')
folders = [x for x
in glob.glob(zip_paths)
if not x.endswith('.zip')]
folders = [x for x in folders if 'Weekly' in x]
possbl = list(set(glob.glob(zip_paths + '/**/*npidata_*', recursive=True)))
possbl = [x for x in possbl if 'Weekly' in x]
paths = {(x.partition(stub)[2].split('/')[0].split('_')[0],
x.partition(stub)[2].split('/')[0].split('_')[1]): x
for x in possbl if 'eader' not in x}
assert len(folders) == len(paths)
return paths
def which_weekly_dissemination_zips_are_updates(folder):
"""
Will need to later add functionality for weekly updates for ploc2 files
"""
last_monthly = max([pd.to_datetime(val.split('-')[1]
.split('.csv')[0]
.replace(' Jan 2013/', '')
.replace('npidata_', ''))
for key, val in
get_filepaths_from_dissemination_zips(folder).items()])
updates = [(x, val) for x, val
in get_weekly_dissemination_zips(folder).items()
if pd.to_datetime(x[1]) > last_monthly]
return updates
def get_secondary_loc_filepaths_from_dissemination_zips(folder):
zip_paths = os.path.join(folder, 'NPPES_Data_Dissemination*')
stub = os.path.join(folder, 'NPPES_Data_Dissemination_')
possbl = list(set(glob.glob(zip_paths + '/**/pl_pfile_*', recursive=True)))
possbl = [x for x in possbl if 'Weekly' not in x]
paths = {(x.partition(stub)[2].split('/')[0].split('_')[1],
str(month_name_to_month_num(
x.partition(stub)[2].split('/')[0].split('_')[0]))): x
for x in possbl if 'eader' not in x}
return paths
def get_filepaths_from_single_variable_files(variable, folder, noisily=True):
'''
Returns a dictionary of the path to each single variable file, for
each month and year
'''
files = glob.glob(os.path.join(folder, '%s*' % variable))
file_dict = {(x.split(variable)[1].split('.')[0][:4],
x.split(variable)[1].split('.')[0][4:]): x
for x in files}
if noisily:
print('For variable %s, there are %s files:'
% (variable, len(file_dict)))
pprint(sorted(list(file_dict.keys())))
return file_dict
def convert_dtypes(df):
'''
Note: should move to generic version found in utils
'''
# weird fix required for bug in select_dtypes
ints_init = (df.dtypes
.reset_index()[df.dtypes.reset_index()[0] == int]['index']
.values.tolist())
current_dtypes = {x: 'int' for x in ints_init}
for t in ['int', 'object', ['float32', 'float64'], 'datetime', 'string']:
current_dtypes.update({x: t for x in df.select_dtypes(t).columns})
dissem_file = (not set(current_dtypes.keys()).issubset(DTYPES.keys()))
for col in df.columns:
final_dtype = (DTYPES[col] if not dissem_file
else DTYPES[{**USE_VAR_LIST_DICT_REVERSE,
**{'seq': 'seq'}}[col]])
if (current_dtypes[col] != final_dtype and
final_dtype not in current_dtypes[col]):
try:
df = df.assign(**{col: coerce_dtypes(df[col],
current_dtypes[col],
final_dtype)})
except ValueError as err:
if final_dtype == 'string':
newcol = coerce_dtypes(df[col], current_dtypes[col], 'str')
newcol = coerce_dtypes(newcol, 'str', 'string')
else:
raise ValueError("{0}".format(err))
return df
def column_details(variable, dissem_file, dta_file):
'''
Generates column list to get from the raw data; dissem files
have long string names and are wide, whereas NBER files have
short names and are long
'''
diss_var = USE_VAR_LIST_DICT[variable]
multi = True if isinstance(diss_var, list) else False
tvar = ['npi', 'seq']
if not dissem_file:
if multi:
if str.isupper(variable) and not dta_file:
def collist(col): return col.upper() == variable or col in tvar
elif str.isupper(variable) and dta_file:
collist = tvar + [variable.lower()]
else:
collist = tvar + [variable]
else:
collist = ['npi', variable]
d_use = {} if not variable == 'ploczip' else {'ploczip': str}
else:
diss_vars = diss_var if multi else [diss_var]
collist = (['NPI'] + diss_var if multi else ['NPI'] + [diss_var])
d_use = {x: object for x in diss_vars if DTYPES[variable] == 'string'}
return collist, d_use
def locate_file(folder, year, month, variable):
'''
'''
paths1 = get_filepaths_from_single_variable_files(variable, folder, False)
if not variable.startswith('ploc2'):
paths2 = get_filepaths_from_dissemination_zips(folder)
else:
paths2 = get_secondary_loc_filepaths_from_dissemination_zips(folder)
try:
return paths1[(year, month)]
except KeyError:
try:
return paths2[(year, month)]
except KeyError:
return None
def read_and_process_df(folder, year, month, variable):
'''
Locates and reads in year-month-variable df from disk,
checks and converts dtypes, makes consistent variable names,
and adds a standardized month column
'''
file_path = locate_file(folder, '%s' % year, '%s' % month, variable)
if file_path:
df = process_filepath_to_df(file_path, variable)
df['month'] = pd.to_datetime('%s-%s' % (year, month))
return df
def read_and_process_weekly_updates(folder, variable):
"""
"""
filepaths = which_weekly_dissemination_zips_are_updates(folder)
if filepaths:
updates = pd.concat(
[process_filepath_to_df(f[1], variable).assign(
week=pd.to_datetime(f[0][0]))
for f in filepaths])
updates['month'] = (pd.to_datetime(updates.week.dt.year.astype(str)
+ '-'
+ updates.week.dt.month.astype(str) + '-' + '1'))
updates = (updates.dropna()
.groupby(['npi', 'month'])
.max()
.reset_index()
.merge(updates)
.drop(columns='week'))
return updates
def process_filepath_to_df(file_path, variable):
"""
"""
is_dissem_file = len(file_path.split('/')) > 6
is_dta_file = os.path.splitext(file_path)[1] == '.dta'
is_pl_file = ('pl_pfile_' in file_path) and is_dissem_file
collist, d_use = column_details(variable, is_dissem_file, is_dta_file)
df = (pd.read_csv(file_path, usecols=collist, dtype=d_use)
if file_path.endswith('.csv')
else pd.read_stata(file_path, columns=collist))
if is_pl_file:
df = (pd.concat([df, df.groupby('NPI').cumcount() + 1], axis=1)
.rename(columns={0: 'seq'}))
if (not is_dissem_file
and variable not in df.columns
and variable.lower() in df.columns):
df = df.rename(columns={variable.lower(): variable})
df = convert_dtypes(df)
df = reformat(df, variable, is_dissem_file)
return df
def reformat(df, variable, is_dissem_file):
'''
'''
multi = True if isinstance(USE_VAR_LIST_DICT[variable], list) else False
if is_dissem_file and multi:
stb = list(set([x.split('_')[0] for x in USE_VAR_LIST_DICT[variable]]))
assert len(stb) == 1
stb = stb[0] + '_'
df = pd.wide_to_long(df, [stb], i="NPI", j="seq").dropna()
df = df.reset_index().rename(columns={'NPI': 'npi', stb: variable})
elif is_dissem_file:
df = df.rename(columns={x: {**USE_VAR_LIST_DICT_REVERSE,
**{'seq': 'seq'}}[x]
for x in df.columns})
return df
def process_variable(folder, variable, searchlist, final_weekly_updates=True):
'''
'''
# searchlist = [x for x in searchlist if x != (2011, 3)]
df_list = []
for (year, month) in searchlist:
print(year, month)
if variable == "PTAXGROUP":
try:
df = read_and_process_df(folder, year, month, variable)
except ValueError as err:
assert year < 2012
else:
df = read_and_process_df(folder, year, month, variable)
df_list.append(df)
df = pd.concat(df_list, axis=0) if df_list else None
if df_list and final_weekly_updates:
u = read_and_process_weekly_updates(folder, variable)
if isinstance(u, pd.DataFrame):
df = df.merge(u, on=['npi', 'month'], how='outer', indicator=True)
if (df._merge == "right_only").sum() != 0:
df.loc[df._merge == "right_only",
'%s_x' % variable] = df['%s_y' % variable]
if (df._merge == "both").sum() != 0:
df.loc[df._merge == "both",
'%s_x' % variable] = df['%s_y' % variable]
df = (df.drop(columns=['_merge', '%s_y' % variable])
.rename(columns={'%s_x' % variable: variable}))
assert (df[['npi', 'month']].drop_duplicates().shape[0]
== df.shape[0])
return df
def sanitize_csv_for_update(df, variable):
df['month'] =
|
pd.to_datetime(df.month)
|
pandas.to_datetime
|
import json
from datetime import datetime
import numpy as np
import pandas as pd
from joblib import dump, load
from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.preprocessing import OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from os import mkdir
class kickstarter_predictor():
def __init__(self) -> None:
self._RSEED=42
self._json_cols=['category', 'location']
self._cat_features_impute = ['country', 'currency', 'category_name', 'location_type']
self._cat_features_onehot = ['country', 'currency', 'category_name', 'location_type']
self.preprocessor = ColumnTransformer(
transformers=[
#('cat_impute', SimpleImputer(strategy='constant', fill_value='missing'), self._cat_features_impute),
('cat_onehot', OneHotEncoder(handle_unknown='ignore'), self._cat_features_onehot),
('untouched', 'passthrough', ['duration','goal_usd', 'launched_at_month', 'created_at_month'])
#('untouched', 'passthrough', ['deadline','static_usd_rate', 'goal', 'launched_at', 'created_at'])
],
sparse_threshold=0
)
self.model = RandomForestClassifier(n_estimators=120, random_state=self._RSEED, max_features = 'sqrt', n_jobs=-1, verbose = 1)
try:
mkdir('./output')
except OSError:
print ("Creation of the directory output failed.")
def expand_json_cols(self, df):
"""
Expand columns that contain json objects
Parameters
---------
df: Pandas DataFrame
Returns
--------
df: Pandas DataFrame
"""
df_dicts = pd.DataFrame()
print('---------- Parsing json ------------')
for col in self._json_cols:
print('Parsing json: '+col)
c = []
for i, val in df[col].items():
try:
c.append(json.loads(val))
except:
c.append(dict())
df_dicts[col] = pd.Series(np.array(c))
print('---------- Expanding dictionaries --------')
df_expanded = []
for col in df_dicts.columns:
print('Expanding: '+col)
df_expanded.append(pd.json_normalize(df_dicts[col]).add_prefix(col+'_'))
df = pd.concat([df.drop(self._json_cols, axis=1), pd.concat(df_expanded, axis=1)], axis=1)
return df
def data_cleaning(self, df):
"""
Filter data frame by relevant columns and rows.
Parameters
---------
df: Pandas DataFrame
Returns
--------
df: Pandas DataFrame
"""
self.base_features = ['country', 'currency', 'category_name', 'location_type', 'goal',
'launched_at', 'created_at', 'blurb', 'state', 'deadline', 'static_usd_rate']
df = df[self.base_features]
#df.dropna(inplace=True)
df = df.query("state == 'successful' or state == 'failed'")
dic = {'successful' : 1, 'failed' : 0}
df['state'] = df['state'].map(dic)
return df
def feature_engineering(self, df):
"""
Add custom features
Parameters
---------
df: Pandas DataFrame
Returns
--------
df: Pandas DataFrame
"""
df['duration'] = (df.deadline-df.launched_at)/(3600*24)
df['duration'] = df['duration'].round(2)
df.drop(['deadline'], axis=1, inplace=True)
df['goal_usd'] = df['goal'] * df['static_usd_rate']
df['goal_usd'] = df['goal_usd'].round(2)
df.drop(['static_usd_rate', 'goal'], axis=1, inplace=True)
df['launched_at_full'] = pd.to_datetime(df['launched_at'], unit='s')
df['launched_at_month'] = pd.DatetimeIndex(df['launched_at_full']).month
df.drop(['launched_at', 'launched_at_full'], axis=1, inplace=True)
df['created_at_full'] = pd.to_datetime(df['created_at'], unit='s')
df['created_at_month'] =
|
pd.DatetimeIndex(df['created_at_full'])
|
pandas.DatetimeIndex
|
import copy
import datetime
from datetime import datetime, timedelta
import math
import re
import numpy as np
import pandas as pd
from PIL import Image
import plotly.express as px
from plotly.subplots import make_subplots
import streamlit as st
from streamlit import markdown as md
from streamlit import caching
import gsheet
LOCAL = False
def is_unique(s):
a = s.to_numpy() # s.values (pandas<0.24)
return (a[0] == a).all()
def st_config():
"""Configure Streamlit view option and read in credential file if needed check if user and password are correct"""
st.set_page_config(layout="wide")
pw = st.sidebar.text_input("Enter password:")
if pw == st.secrets["PASSWORD"]:
return st.secrets["GSHEETS_KEY"]
else:
return None
@st.cache
def read_data(creds,ws,gs):
"""Read court tracking data in and drop duplicate case numb
ers"""
# try:
df = gsheet.read_data(gsheet.open_sheet(gsheet.init_sheets(creds),ws,gs))
# df.drop_duplicates("Case Number",inplace=True) #Do we want to drop duplicates???
return df
# except Exception as e:
# st.write(e)
# return None
def date_options(min_date,max_date,key):
quick_date_input = st.selectbox("Date Input",["Custom Date Range","Previous Week","Previous 2 Weeks","Previous Month (4 weeks)"],0,key=key)
if quick_date_input == "Previous Week":
start_date = (
datetime.today() - timedelta(weeks=1)
).date()
end_date = datetime.today().date()
if quick_date_input == "Previous 2 Weeks":
start_date = (
datetime.today() - timedelta(weeks=2)
).date()
end_date = datetime.today().date()
if quick_date_input == "Previous Month (4 weeks)":
start_date = (
datetime.today() - timedelta(weeks=4)
).date()
end_date = datetime.today().date()
if quick_date_input == "Custom Date Range":
key1 = key + "a"
key2 = key + "b"
cols = st.beta_columns(2)
start_date = cols[0].date_input("Start Date",min_value=min_date,max_value=max_date,value=min_date,key=key1)#,format="MM/DD/YY")
end_date = cols[1].date_input("End Date",min_value=min_date,max_value=max_date,value=datetime.today().date(),key=key2)#,format="MM/DD/YY")
return start_date,end_date
def filter_dates(df,start_date,end_date,col):
df = df.loc[
(df[col].apply(lambda x: x)>=start_date) &
(df[col].apply(lambda x: x)<=end_date)
]
return df
def agg_cases(df,col,i):
df_r = df.groupby([col,"Case Number"]).count().iloc[:,i]
df_r.name = "count"
df_r = pd.DataFrame(df_r)
df_a = pd.DataFrame(df_r.to_records())
df_r = df_r.groupby(level=0).sum()
df_r["cases"] = df_a.groupby(col)["Case Number"].agg(lambda x: ','.join(x))
return df_r
def agg_checklist(df_r):
df_r["result"]=df_r.index
df_b = pd.concat([pd.Series(row['count'], row['result'].split(', ')) for _,row in df_r.iterrows()]).reset_index().groupby("index").sum()
df_a = pd.concat([pd.Series(row['cases'], row['result'].split(', ')) for _,row in df_r.iterrows()]).reset_index().groupby("index").agg(lambda x: ", ".join(x))
df_r = df_b.merge(df_a,right_index=True,left_index=True)
return df_r
def convert(x):
try:
return x.date()
except:
return None
def convert_date(df,col):
"""Helper function to convert a col to a date"""
df[col] = pd.to_datetime(df[col]).apply(lambda x: convert(
x))
#convert NaTs to None
df[col] = (
df[col]
.astype(object)
.where(df[col].notnull(), None)
)
return df
def clean_df(df):
"""clean data and convert types for display"""
df.fillna("",inplace=True)
df = df.astype(str)
return df
def court_tracking_data(df,df_s,df_e):
with st.beta_expander("Court Tracking Data"):
#set up UI date filter element
try:
min_date = df["court_date"].min()-timedelta(days=7)
except: #happens when nulls are in court_date column
min_date = df["court_date"].iloc[0]
max_date = datetime.today().date()+timedelta(days=90)
start_date,end_date = date_options(
min_date,max_date,"1"
)
#Filter data by date
df_f = filter_dates(
df,
start_date,
end_date,
"court_date"
)
df_ef = filter_dates(
df_e,
start_date,
end_date,
"date_filed"
)
#get rid of motion hearings now that we have there stats finished
df_fe = df_f[df_f['motion_hearing']!='Motion Hearing']
#Court tracker volunteer stats
cols = st.beta_columns(2)
if cols[0].checkbox("Volunteer Data (click to expand)"):
cols1 = st.beta_columns(2)
cols1[0].markdown("## Volunteer Tracker Data")
cols1[1].markdown("## ")
cols1[0].markdown(f"### :eyes: Number of trackers:\
{len(df_f['Tracker Name'].unique())}")
cols1[0].write(
df_f
.groupby('Tracker Name')['Case Number']
.nunique()
.sort_values(ascending=False)
)
if cols[1].checkbox("Motion Hearing Data (click to expand)"):
motion_hearings(df_f)
#judge data
if cols[0].checkbox("Judge Tracked Data (click to expand)"):
judge_data(df_fe,df_ef)
#Technical problems
if cols[1].checkbox("Technical Difficulties (click to expand)"):
tech_probs(df_fe)
#pie chart data
if cols[0].checkbox("Pie charts (click to expand)"):
pie_chart_build(df_fe)
#all qualitative data
if cols[1].checkbox("All Qualitative Data (click to expand)"):
render_all_qual_data(df_fe)
def pie_chart_build(df_fe):
cols = st.beta_columns(2)
cols[0].markdown("## Pie Charts for Selected Responses")
cols[1].markdown("## ")
#pie chart columns
pie_chart_cols = [
"Final Case Status",
"RRT Referal",
"Appeals Discussed",
# "NPE Discussion",
"NTV Discussed",
"Tenant Representation",
"Plaintiff Representation",
"Poor Conditions Discussed?",
"Tenant Type",
"Eviction Reason",
"Owner or Property Manager Race",
"Tenant Race",
"Property Type", #also include property subsidy?
"Defendant Language",
"Interpretation Provided",
"Digital Divide Issues",
]
pie_chart_qcols = [
["Other Final Status","Dismissal Reason","Other Dismissal Reason","Abated Reason","Other Abated Reason"],
["RRT Details"],
["Appeals Details"],
# ["NPE Date","NPE Comments"],
["NTV Details","NTV Date","NTV Communicated By","Other NTV Communication"],
["Tenants Name","Tenant Attorney","Other Tenant Representation"],
["Owner or Property Manager Name","Attorney Details","Nationwide Details","Other Plaintiff Representative Details","Plaintiff Details"],
["Poor Condition Details"],
["Other Tenancy Details"],
["Breach of Lease","Other Breach of Lease"],
None,
["Other Tenant Race"],
["Property Name","Property Address","Property Managament","Property Details","Unit Size"],
None,
["Langauage Access Comments"],
["Digital Divide Details"]
]
for col,qcols in zip(pie_chart_cols,pie_chart_qcols):
pie_chart(
df_fe,
col,
cols,#display columns
qcols
)
def motion_hearings(df_f):
cols = st.beta_columns(2)
cols[0].markdown("## Motion Hearing Stats/Qualitative Data")
cols[1].markdown("## ")
df = df_f[df_f['motion_hearing']=='Motion Hearing']
cols[0].markdown(f"### Total number of motion hearings: {df['Case Number'].nunique()}")
qual_cols = ["Plaintiff Details","Defendant Details","RRT Referal","RRT Details","Misc Details"]
render_qual_pie(df,cols,qual_cols)
def judge_data(df_f,df_ef):
display_cols = st.beta_columns(2)
display_cols[0].markdown("## Tacked and Filed Case Counts")
display_cols[1].markdown("## ")
#cases tracked by jp and cases filed
df_f["jp"] = df_f["Case Number"].str[1:2]
df_fjp = pd.DataFrame(df_f
.groupby('jp')['Case Number']
.nunique()
# .sort_values(ascending=False)
)
df_ef_jp = pd.DataFrame(df_ef
.groupby('precinct')['case_number']
.nunique()
# .sort_values(ascending=False)
)
df = pd.DataFrame()
for i in range(1,11):
if i % 2 == 1 :
idx = str(int(math.ceil(i/2)))
df.at[i,"case_type"] = f"JP{idx} Cases Tracked"
df.at[i,"Case Count"] = df_fjp.loc[idx,"Case Number"]
else:
idx = str(int(i/2))
df.at[i,"case_type"] = f"JP{idx} Cases Filed "
df.at[i,"Case Count"] = df_ef_jp.loc[idx,"case_number"]
fig = px.bar(df, x='case_type', y='Case Count')
display_cols[0].markdown("### Cases tracked and Filed by JP")
display_cols[0].plotly_chart(fig,use_container_width=True)
display_cols[0].write(df)
#cases tracked by judge
df_fj = (df_f
.groupby('Judge Name')['Case Number']
.nunique()
.sort_values(ascending=False))
fig = px.bar(df_fj,x=df_fj.index,y='Case Number')
display_cols[1].markdown("### Cases tracked by judge")
display_cols[1].plotly_chart(fig,use_container_width=True)
display_cols[1].write(df_fj)
def tech_probs(df_f):
display_col = st.beta_columns(2)
display_col[0].markdown("## Court Technical Difficulties")
display_col[1].markdown("## ")
#technical problems vs cases we watched by jp (technical problems) filter by date (note improvement)
#only care about cases with tech probs
df_f["jp"] = df_f["Case Number"].str[:2]
df = df_f.loc[
(df_f["Technical Problems?"]!="No technical issues") &
(df_f["Technical Problems?"]!="")
]
df_t = (df
.groupby('jp')['Case Number']
.nunique()
)
fig = px.bar(df_t,x=df_t.index,y='Case Number')
display_col[0].markdown("### Court Tech problems by JP")
display_col[0].plotly_chart(fig,use_container_width=True)
#Percentage of cases with problem table by jp
df_tot = (df_f
.groupby('jp')['Case Number']
.nunique()
)
df_tot = df_t.to_frame().merge(df_tot.to_frame(),right_index=True,left_index=True)
df_tot.columns = ["Cases With Tech Probs","Total Tracked Cases"]
df_tot["Percentage"] = round(df_tot["Cases With Tech Probs"]/df_tot["Total Tracked Cases"],2)*100
display_col[0].write(df_tot)
#technical narrative box with all qualitative data
display = [
"<NAME>",
# "Technical Problems?",
"Other technical problems"
]
df = df_f[display]
df = df.groupby("<NAME>").agg(lambda x: ' / '.join(x))
# df["Technical Problems?"] = df["Technical Problems?"].apply(
# lambda x: re.sub(',+', ' ',x)
# )
df["Other technical problems"] = df["Other technical problems"].apply(
lambda x: re.sub('( / )+', ' / ',x)
)
display_col[1].markdown(f"### Qualitative Data")
for idx,row in df.iterrows():
text = ""
for i,col in enumerate(df.columns):
if row[col] != "":
text += row[col] + ", "
display_col[1].markdown(f"**{idx}** {text}")
def judge_data_filings(df_ef):
display_col = st.beta_columns(2)
display_col[0].markdown("## Filings Data")
display_col[1].markdown("## ")
#cases filed by judge
df_ef['precinct'] = 'JP'+df_ef['precinct']
df_efjp = (df_ef
.groupby('precinct')['case_number']
.nunique()
# .sort_values(ascending=False)
)
fig = px.bar(
df_efjp,
x=df_efjp.index,
y='case_number'
)
display_col[0].markdown("### Cases filed by judge")
display_col[0].plotly_chart(fig,use_container_width=True)
def pie_chart(df,col,display,qualitative_data_cols=None):
display[0].markdown(f"### {col} Total Unanswered: {df[df[col]=='']['Case Number'].nunique()+df[df[col]=='Unknown']['Case Number'].nunique()}/{df['Case Number'].nunique()}")
df = df[df[col]!='']
df_pie = df.groupby(col).count()["Case Number"]
df_pie = pd.DataFrame(df_pie)
fig = px.pie(
df_pie,
values="Case Number",
names=df_pie.index,
)
display[0].plotly_chart(fig)
#render qualitative data if passed
if qualitative_data_cols:
qdata_cols_final = []
for qcol in qualitative_data_cols:
if display[0].checkbox(f"See {qcol}"):
qdata_cols_final.append(qcol)
render_qual_pie(df,display,qdata_cols_final)
else:
display[0].write("No qualitative data to display")
def render_qual_pie(df,display,qual_cols):
df.reset_index(inplace=True)
#include defendant and case nunber
qual_cols.append('Case Details')
qual_cols.append('Case Number')
df = df[qual_cols]
df.replace("Unknown","",inplace=True)
for col in df.columns:
if not((col == "Case Details") or (col == "Case Number")):
display[1].markdown(f"### {col}")
for i,entry in enumerate(df[col]):
if entry != "":
display[1].markdown(f"**{df.at[i,'Case Details']}/{df.at[i,'Case Number']}:** {entry}")
def render_all_qual_data(df):
display = st.beta_columns(2)
display[0].markdown("## All Qualitative Data")
display[1].markdown("## ")
cols = [
"Late Reason",
"Other technical problems",
"Other Final Status",
"Dismissal Reason",
"Other Dismissal Reason",
"Abated Reason",
"Other Abated Reason",
"Postponed Date",
"Fee Details",
"Attorney Details",
"Nationwide Details",
"Other Plaintiff Representative Details",
"Plaintiff Details",
"Defendant Details",
"Langauage Access Comments",
"Disability Accomodations Details",
"Digital Divide Details",
"Property Name",
"Property Address",
"Property Managament",
"Property Details",
"COVID Details",
"Poor Condition Details",
"Details About Documents and Evidence Shared with Tenant",
"Other Tenancy Details",
"Late Fees/ Other Arrears",
"Tenant Dispute Amount",
"NTV Details",
"Other NTV Communication",
"CDC Details",
"NPE Comments",
"Appeals Details",
"RRT Details",
"Misc Details",
"Other Breach of Lease",
"Plaintiff Attorney",
"Nationwide Name",
"Other Plaintiff Representation",
"Tenant Attorney",
"Other Tenant Representation",
]
df.reset_index(inplace=True)
#include defendant and case nunber
cols.append('Case Details')
cols.append('Case Number')
df = df[cols]
df.replace("Unknown","",inplace=True)
for col in cols:
if not((col == "Case Details") or (col == "Case Number")):
if display[0].checkbox(f"Qualitative data for {col} (click to expand)"):
display[1].markdown(f"### {col}")
for i,entry in enumerate(df[col]):
if entry != "":
display[1].markdown(f"**{df.at[i,'Case Details']}/{df.at[i,'Case Number']}:** {entry}")
def setting_data(df_s):
#(settings now to ~90 days out)
container = st.beta_container()
cols_container = container.beta_columns(2)
cols = st.beta_columns(2)
days = cols[0].slider(
"Days out?",
0,
90,
90
)
df_sf = filter_dates(
df_s,
datetime.today().date(),
(datetime.today()+timedelta(days=days)).date(),
"setting_date"
)
cols_container[0].markdown(f"### :calendar: Number of Settings \
today-{days} days out: {len(df_sf)}")
df_sf.index = df_sf["case_number"]
cols[0].write(
df_sf[["setting_date","setting_time"]]
)
def judgement_data(dfj):
display = st.beta_columns(2)
display[0].markdown("## Case Outcomes")
display[1].markdown("## ")
#possesion and monetary judgement by jp
#convert to numeric for amounts
dfj["amount_awarded"] = pd.to_numeric(dfj["amount_awarded"])
dfj["poss_awarded"] = dfj["comments"].str.contains("POSS")
#we want to plot data for each precinct on how much was awarded to plaintiffs and how many possesions
#build df for graph
df_graph = pd.DataFrame()
for i in range(1,6):
#possesion break downs
df_graph.at[i,"Possesion Awarded"] = len(dfj.loc[dfj["poss_awarded"]].loc[dfj["precinct"]==str(i)]) #this is not accurate
#amount breakdowns
df_graph.at[i,"Amount Awarded"] = float(dfj.loc[(dfj["precinct"]==str(i)) & (dfj["judgement_for"]=="PLAINTIFF")]["amount_awarded"].sum())
#judgement breakdowns
df_graph.at[i,"Judgment For Plaintiff"] = len(dfj.loc[(dfj["judgement_for"] == "PLAINTIFF") & (dfj["precinct"]==str(i))])
df_graph.at[i,"Judgment For Defendant"] = len(dfj.loc[(dfj["judgement_for"] == "DEFENDANT") & (dfj["precinct"]==str(i))])
df_graph.at[i,"No Judgment"] = len(dfj.loc[(dfj["judgement_for"] == "NO JUDGEMENT") & (dfj["precinct"]==str(i))])
#total number of cases
df_graph.at[i,"Total Number of cases"] = len(dfj.loc[dfj["precinct"]==str(i)])
#bar chart for amount
df_bar = df_graph[["Amount Awarded"]]
fig = px.bar (
df_bar,
x = df_bar.index,
y = "Amount Awarded",
labels={
"index": "Justice of the Peace"
},
orientation = "v",
title = "Amounts Awarded by Precinct"
)
display[0].plotly_chart(fig)
#make pie charts FIGURE OUT HOW TO STOP SORTING THESE
df_pie = df_graph[["Judgment For Plaintiff","Judgment For Defendant","No Judgment"]].T
for i in range(1,6):
df_pc = df_pie[i]
fig = px.pie(
df_pc,
values = df_pc.values,
names = df_pc.index,
color = df_pc.values,
color_discrete_map={"Judgment for Plaintiff":"red","Judgment for Defendant":"green","No Judgment":"blue"},
title = f"Precinct {i} Case Outcomes"
)
display[(i)%2].plotly_chart(fig)
display[0].markdown("### Judgment Data")
df_graph["Amount Awarded"] = df_graph["Amount Awarded"].apply(lambda x: '${:,.2f}'.format(float(x)))
display[0].write(df_graph)
def representation_data(df):
display = st.beta_columns(2)
display[0].markdown("## Representation Information")
display[1].markdown("## ")
df_graph = pd.DataFrame()
for i in range(1,6):
#Representation Break downs
df_graph.at[i,"Plaintiffs Attorneys"] = len(df.loc[(df["attorneys_for_plaintiffs"]!= "PRO SE") & (df["attorneys_for_plaintiffs"]!="") & (df["precinct"]==str(i))])
df_graph.at[i,"Defendants Attorneys"] = len(df.loc[(df["attorneys_for_defendants"]!= "PRO SE") & (df["attorneys_for_defendants"]!="") & (df["precinct"]==str(i))])
df_graph.at[i,"Plaintiffs Pro Se"] = len(df.loc[(df["attorneys_for_plaintiffs"]== "PRO SE") & (df["attorneys_for_plaintiffs"]!="") & (df["precinct"]==str(i))])
df_graph.at[i,"Defendants Pro Se"] = len(df.loc[(df["attorneys_for_defendants"]== "PRO SE") & (df["attorneys_for_defendants"]!="") & (df["precinct"]==str(i))])
df_graph.at[i,"Plaintiffs No Rep Data"] = len(df.loc[(df["attorneys_for_defendants"]=="") & (df["precinct"]==str(i))])
df_graph.at[i,"Defendants No Rep Data"] = len(df.loc[(df["attorneys_for_defendants"]=="") & (df["precinct"]==str(i))])
#total number of cases
df_graph.at[i,"Total Number of Cases"] = len(df.loc[df["precinct"]==str(i)])
# display[0].markdown("### Defendant Representation Counts")
display[0].markdown("### Representation Counts")
df_graph = df_graph.astype(int)
display[0].write(df_graph)
# display[0].markdown("### Defendant Representation Counts")
# display[0].write(df_graph[["Defendants Attorneys","Defendants Pro Se","Defendants No Rep","Total Number of Cases"]])
# display[1].markdown("### Plaintiff Representation Counts")
# display[1].write(df_graph[["Plaintiffs Attorneys","Plaintiffs Pro Se","Plaintiffs No Rep","Total Number of Cases"]])
#display[0].markdown("### Representation Bar Graph")
# fig = px.bar(df_graph,x=df_graph.index,y=["Defendants Attorneys","Plaintiffs Attorneys","Defendants Pro Se","Plaintiffs Pro Se"])
#display[0].plotly_chart(fig)
#top plaintiff attorneys
df_a = df[(df["attorneys_for_plaintiffs"]!="PRO SE") & (df["attorneys_for_plaintiffs"]!="")]
df_af = df_a.groupby("attorneys_for_plaintiffs").count()["case_number"].sort_values(ascending=False)
display[0].markdown("### Top Plaintiff Attorneys")
display[0].write(df_af)
def plaintiff_data(df_ef):
#determine top plaintifss
display = st.beta_columns(2)
display[0].markdown("## Top Plaintiffs")
display[1].markdown("## ")
df = df_ef.groupby("plaintiff").count()["case_number"]
df= df.sort_values(ascending=False)
display[0].write(df)
pass
def property_data(df_ef):
display = st.beta_columns(2)
display[0].markdown("## Property Data")
display[1].markdown("## ")
#determine top properties
df_prop = df_ef[["parcel_id","code_complaints_count","code_violations_count","current_property_owner","dba","2016_unit_count","lon","lat"]]
#get rid of unmatched entries
df_prop = df_prop[df_prop["parcel_id"]!=""]
#determine counts
df1 = df_prop.groupby("parcel_id").count()["dba"]
df1.columns = "Eviction Count"
#get rid of duplicate ids since we already counted them
df_props = df_prop.drop_duplicates("parcel_id")
#merge counts back in and create final data frame
df_props = df_props.merge(df1,left_on="parcel_id",right_index=True)
#drop uneeded columns and rename
df_pf = df_props[["dba_x","dba_y","parcel_id"]]
df_pf.columns = ["DBA","Eviction Count","Parcel ID"]
df_pf.sort_values("Eviction Count",ascending=False,inplace=True)
#sort and take top 25
display[0].markdown("## Top Properties by Eviction")
display[0].write(df_pf)
#map properties?
df_props["lon"] =
|
pd.to_numeric(df_props["lon"])
|
pandas.to_numeric
|
# -*- coding: utf-8 -*-
# @Author : gaodi12
# @Email : <EMAIL>
# @License: Mulan PSL v2
# @Date : 2022-12-20 13:46:29
import time
import pandas as pd
from flask import jsonify, g
from server import db, redis_client
from server.model.task import TaskDistributeTemplate, DistributeTemplateType, \
Task, TaskStatus, TaskParticipant, TaskMilestone, TaskManualCase
from server.model.testcase import Suite, Case
from server.model.group import ReUserGroup
from server.utils.page_util import PageUtil
from server.utils.response_util import RET
from server.utils.db import collect_sql_error
from server.utils.redis_util import RedisKey
from server.utils.permission_utils import PermissionManager
from server.utils.read_from_yaml import get_api
from .services import judge_task_automatic
from .handlers import HandlerTask
class HandlerTemplate:
@staticmethod
@collect_sql_error
def get(query):
org_id = redis_client.hget(RedisKey.user(g.gitee_id), 'current_org_id')
rugs = ReUserGroup.query.filter_by(user_gitee_id=g.gitee_id, org_id=org_id, is_delete=False,
user_add_group_flag=True).all()
groups = [item.group_id for item in rugs]
filter_params = [TaskDistributeTemplate.group_id.in_(groups)]
if query.name:
filter_params.append(TaskDistributeTemplate.name.like(f'%{query.name}%'))
if query.group_id:
filter_params.append(TaskDistributeTemplate.group_id == query.group_id)
if query.type_name:
filter_params.append(DistributeTemplateType.name.like(f'%{query.type_name}%'))
query_filter = TaskDistributeTemplate.query.join(DistributeTemplateType).filter(*filter_params)
else:
query_filter = TaskDistributeTemplate.query.filter(*filter_params)
page_dict, e = PageUtil.get_page_dict(query_filter, query.page_num, query.page_size, func=lambda x: x.to_json())
if e:
return jsonify(error_code=RET.SERVER_ERR, error_msg=f'get group page error {e}')
return jsonify(error_code=RET.OK, error_msg="OK", data=page_dict)
@staticmethod
@collect_sql_error
def add(body):
org_id = redis_client.hget(RedisKey.user(g.gitee_id), 'current_org_id')
template = TaskDistributeTemplate(name=body.name, creator_id=g.gitee_id,
group_id=body.group_id, permission_type='group', org_id=org_id)
if body.types:
for item in body.types:
dtt = DistributeTemplateType()
dtt.name = item.name
dtt.executor_id = item.executor_id
dtt.creator_id = g.gitee_id
dtt.group_id = template.group_id
dtt.permission_type = template.permission_type
dtt.suites = ','.join(item.suites)
dtt.helpers = ','.join(item.helpers) if item.helpers else ''
template.types.append(dtt)
template.add_update()
_data = {
"permission_type": template.permission_type,
"group_id": template.group_id,
}
scope_data_allow, scope_data_deny = get_api("task", "template.yaml", "template", template.id)
PermissionManager().generate(scope_datas_allow=scope_data_allow, scope_datas_deny=scope_data_deny,
_data=_data)
for _type in template.types:
scope_data_allow, scope_data_deny = get_api("task", "type.yaml", "type", _type.id)
PermissionManager().generate(scope_datas_allow=scope_data_allow, scope_datas_deny=scope_data_deny,
_data=_data)
return jsonify(error_code=RET.OK, error_msg="OK")
@staticmethod
@collect_sql_error
def update(template_id, body):
template = TaskDistributeTemplate.query.get(template_id)
for key, value in body.dict().items():
if hasattr(template, key) and value is not None:
setattr(template, key, value)
template.add_update()
return jsonify(error_code=RET.OK, error_msg="OK")
@staticmethod
@collect_sql_error
def delete(template_id):
template = TaskDistributeTemplate.query.get(template_id)
for item in template.types:
HandlerTask.unbind_role_table("task", "type", item)
db.session.delete(item)
db.session.commit()
HandlerTask.unbind_role_table("task", "template", template)
db.session.delete(template)
db.session.commit()
return jsonify(error_code=RET.OK, error_msg="OK")
class HandlerTemplateType:
@staticmethod
def get_all_suites(template: TaskDistributeTemplate):
template_suites = []
for item in template.types:
template_suites = template_suites + item.suites.split(',') if item.suites else template_suites
return template_suites
@staticmethod
@collect_sql_error
def get(query):
filter_params = [Suite.deleted.is_(False)]
if query.type_id:
return HandlerTemplateType.get_info(query.type_id)
if query.template_id:
template = TaskDistributeTemplate.query.get(query.template_id)
template_suites = HandlerTemplateType.get_all_suites(template)
filter_params.append(Suite.id.notin_(template_suites))
query_filter = Suite.query.filter(*filter_params)
page_dict, e = PageUtil.get_page_dict(query_filter, query.page_num, query.page_size, func=lambda x: x.to_json())
if e:
return jsonify(error_code=RET.SERVER_ERR, error_msg=f'get group page error {e}')
return jsonify(error_code=RET.OK, error_msg="OK", data=page_dict)
@staticmethod
@collect_sql_error
def get_info(type_id):
return_data = DistributeTemplateType.query.get(type_id)
return jsonify(error_code=RET.OK, error_msg="OK", data=return_data.to_json())
@staticmethod
@collect_sql_error
def add(template_id, body):
template = TaskDistributeTemplate.query.get(template_id)
if body.name in [item.name for item in template.types]:
return jsonify(error_code=RET.PARMA_ERR, error_msg='name has exists')
dtt = DistributeTemplateType()
dtt.name = body.name
dtt.creator_id = g.gitee_id
dtt.executor_id = body.executor_id
dtt.group_id = template.group_id
dtt.permission_type = template.permission_type
dtt.suites = ','.join(body.suites)
dtt.helpers = ','.join(body.helpers)
dtt_id = dtt.add_flush_commit_id()
template.types.append(dtt)
template.add_update()
_data = {
"permission_type": template.permission_type,
"group_id": template.group_id,
}
scope_data_allow, scope_data_deny = get_api("task", "type.yaml", "type", dtt_id)
PermissionManager().generate(scope_datas_allow=scope_data_allow, scope_datas_deny=scope_data_deny,
_data=_data)
return jsonify(error_code=RET.OK, error_msg="OK")
@staticmethod
@collect_sql_error
def update(type_id, body):
dtt = DistributeTemplateType.query.get(type_id)
for key, value in body.dict().items():
if hasattr(dtt, key) and value is not None:
setattr(dtt, key, value)
dtt.add_update()
return jsonify(error_code=RET.OK, error_msg="OK")
@staticmethod
@collect_sql_error
def delete(type_id):
dtt = DistributeTemplateType.query.get(type_id)
db.session.delete(dtt)
db.session.commit()
HandlerTask.unbind_role_table("task", "type", dtt)
return jsonify(error_code=RET.OK, error_msg="OK")
class HandlerTaskDistributeCass:
def __init__(self):
self.status = TaskStatus.query.filter_by(name="待办中").first()
self.parent_task = None
self.task_milestone = None
@collect_sql_error
def distribute(self, task_id, template_id, body):
# 分析数据
# milestone_id, case_id, suite_id
task = Task.query.get(task_id)
if not task or not task.group_id or task.type == 'PERSON':
return jsonify(error_code=RET.PARMA_ERR, error_msg='task can not use template distribute cases')
self.parent_task = task
_origin = [_ for _ in self.parent_task.children]
task_milestone = TaskMilestone.query.filter_by(task_id=task_id, milestone_id=body.milestone_id).first()
if not task_milestone:
return jsonify(error_code=RET.PARMA_ERR, error_msg='task milestone relationship no find')
self.task_milestone = task_milestone
task_cases = task_milestone.distribute_df_data(body.distribute_all_cases)
task_cases_df = pd.DataFrame(task_cases,
columns=['milestone_id', 'case_id', 'suite_id', 'case_result'])
# suite_id, executor_id, helpers, type_name
template = TaskDistributeTemplate.query.get(template_id)
if template.group_id != task.group_id:
return jsonify(error_code=RET.PARMA_ERR, error_msg='task group not match template group')
template_cases = []
template_types = []
for item in template.types:
template_types.append(item.name)
if not item.suites:
continue
for suite in item.suites.split(','):
template_cases.append((int(suite), item.executor_id, item.helpers, item.name))
template_cases_df = pd.DataFrame(template_cases, columns=['suite_id', 'executor_id', 'helpers', 'type_name'])
merge_df =
|
pd.merge(task_cases_df, template_cases_df, on='suite_id')
|
pandas.merge
|
import faiss
from utils import MovieDataApp, RetToHive
spark = MovieDataApp().spark
import numpy as np
spark.sql('use movie_recall')
# id = spark.sql('select movie_id from movie_vector_1969').limit(10000)
user_vector_arr = spark.sql('select * from movie_vector_1969').limit(100).toPandas().values[:, 2].tolist()
user_vector_arr = np.asarray(user_vector_arr).astype('float32')
# print(type(user_vector_arr))
# print(user_vector_arr.shape)
# print(user_vector_arr)
# user_vector_arr.printSchema()
gds_vector_arr = spark.sql('select movievector from movie_vector_1969').limit(100).toPandas().values[:, 0].tolist()
gds_vector_arr = np.asarray(gds_vector_arr).astype('float32')
# print(gds_vector_arr.shape)
# print(gds_vector_arr)
# user_vector_arr # shape(1000, 100)
# gds_vector_arr # shape(100, 100)
dim = 100# 向量维度
k = 10 # 定义召回向量个数
index = faiss.IndexFlatL2(dim) # L2距离,即欧式距离(越小越好)
# index=faiss.IndexFlatIP(dim) # 点乘,归一化的向量点乘即cosine相似度(越大越好)
# print(index.is_trained)
index.add(gds_vector_arr) # 添加训练时的样本
# print(index.ntotal)
D, I = index.search(user_vector_arr, k) # 寻找相似向量, I表示相似用户ID矩阵, D表示距离矩阵
print(D[:5])
print(I[-5:])
import pandas as pd
df = pd.DataFrame(columns=['index'])
df.append(
|
pd.Series([None])
|
pandas.Series
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
from config import test_snr_dB
import pandas as pd
from scipy.stats import ttest_1samp
def plot_paper_results(folder_envtfs, folder_stft):
sns.set(style="whitegrid")
df_env = pd.read_csv('models\\' + folder_envtfs + '\\results.csv', sep=';')
df_stft = pd.read_csv('models\\' + folder_stft + '\\results.csv', sep=';')
df_orig = df_env.copy()
df_orig = df_orig.drop(['eSTOI pred.'],axis=1)
df_orig = df_orig.drop(['PESQ pred.'],axis=1)
df_orig = df_orig.rename(columns={'eSTOI orig.':'eSTOI pred.'})
df_orig = df_orig.rename(columns={'PESQ orig.':'PESQ pred.'})
df_orig[' '] = 'Original'
df_env[' '] = 'ENV-TFS'
df_stft[' '] = 'STFT'
df = pd.concat([df_orig, df_stft, df_env])
sns.set(style="ticks",font='STIXGeneral')
fig = plt.figure(figsize=(11, 4.5))
size=16
plt.subplot(121)
ax = sns.boxplot(x='SNR', y='eSTOI pred.', hue=' ', data=df, fliersize=1)
plt.xlabel('SNR (dB)', {'size': size})
plt.ylabel('eSTOI', {'size': size})
ax.legend_.remove()
# ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.8)
ax.tick_params(labelsize=size)
lines, labels = ax.get_legend_handles_labels()
# fig.legend(lines, labels, loc='upper center')
fig.legend(lines, labels, loc='upper center', bbox_to_anchor=(0.53, 0.10), shadow = False, ncol = 3, prop={'size': size-3})
plt.tight_layout()
# plt.savefig('fig4.1_estoi_total.pdf',dpi=2000)
# plt.show()
# plt.figure(figsize=(11, 4.5))
plt.subplot(122)
ax = sns.boxplot(x='SNR', y='PESQ pred.', hue=' ', data=df, fliersize=1)
ax.legend_.remove()
ax.tick_params(labelsize=size)
# ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.11), ncol = 3)
plt.xlabel('SNR (dB)',{'size': size})
plt.ylabel('PESQ', {'size': size})
# ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.8)
plt.tight_layout()
plt.savefig('fig4_estoi_pesq_total.pdf',dpi=2000)
plt.show()
# multi plot
sns.set(style="ticks",font='STIXGeneral',font_scale=1.3)
g = sns.relplot(x="SNR", y="eSTOI pred.", hue = " ", col = "Noise", data = df, kind = "line",
col_wrap=5, height=2.5, aspect=0.8, legend='full')
# plt.tight_layout()
g.fig.subplots_adjust(wspace=0.10)
g.set_ylabels('eSTOI')
g.set_xlabels('SNR (dB)')
g.set(xticks=[-6, 0, 6])
g.set(xlim=(min(test_snr_dB), max(test_snr_dB)))
g.set(ylim=(0, 1))
g.set_titles("{col_name}",)
# for a in g.axes:
# a.axhline(a.get_yticks()[1], alpha=0.5, color='grey')
leg = g._legend
leg.set_bbox_to_anchor([0.84, 0.86]) # coordinates of lower left of bounding box
leg._loc = 1
plt.savefig('fig5_estoi_per_noise.pdf',bbox_inches='tight',dpi=2000)
plt.show()
# eSTOI increase histogram
plt.figure()
ax = sns.distplot(df_env['eSTOI pred.'] - df_env['eSTOI orig.'], kde_kws={"shade": True}, norm_hist=True, label='ENV-TFS')
sns.distplot(df_stft['eSTOI pred.'] - df_stft['eSTOI orig.'], kde_kws={"shade": True}, norm_hist=True, label='STFT')
plt.legend()
vals = ax.get_xticks()
ax.set_xticklabels(['{:,.0%}'.format(x) for x in vals])
plt.xlabel('eSTOI increase')
plt.ylabel('density')
plt.tight_layout()
plt.show()
# PESQ increase per snr histogram
# ax = sns.kdeplot(df_env['SNR'], df_env['PESQ pred.'] - df_env['PESQ orig.'], cmap="Reds", shade=True,shade_lowest=False, label='ENV')
# sns.kdeplot(df_stft['SNR'], df_stft['PESQ pred.'] - df_stft['PESQ orig.'], cmap="Blues", shade=True,shade_lowest=False, label='STFT')
ax = sns.distplot(df_env['PESQ pred.'] - df_env['PESQ orig.'], kde_kws={"shade": True}, norm_hist=True,
label='ENV-TFS')
sns.distplot(df_stft['PESQ pred.'] - df_stft['PESQ orig.'], kde_kws={"shade": True}, norm_hist=True, label='STFT')
plt.legend()
vals = ax.get_xticks()
plt.xlabel('PESQ increase')
plt.ylabel('density')
plt.tight_layout()
plt.show()
return
def plot_matlab_results(folder_envtfs, folder_stft):
df_env1 = pd.read_excel('models\\' + folder_envtfs + '\\HA_1.xls')
df_env2 = pd.read_excel('models\\' + folder_envtfs + '\\HA_2.xls')
df_env3 = pd.read_excel('models\\' + folder_envtfs + '\\HA_3.xls')
df_env4 = pd.read_excel('models\\' + folder_envtfs + '\\HA_4.xls')
df_env5 = pd.read_excel('models\\' + folder_envtfs + '\\HA_5.xls')
df_env6 = pd.read_excel('models\\' + folder_envtfs + '\\HA_6.xls')
df_stft1 = pd.read_excel('models\\' + folder_stft + '\\HA_1.xls')
df_stft2 = pd.read_excel('models\\' + folder_stft + '\\HA_2.xls')
df_stft3 =
|
pd.read_excel('models\\' + folder_stft + '\\HA_3.xls')
|
pandas.read_excel
|
"""
An exhaustive list of pandas methods exercising NDFrame.__finalize__.
"""
import operator
import re
import numpy as np
import pytest
import pandas as pd
# TODO:
# * Binary methods (mul, div, etc.)
# * Binary outputs (align, etc.)
# * top-level methods (concat, merge, get_dummies, etc.)
# * window
# * cumulative reductions
not_implemented_mark = pytest.mark.xfail(reason="not implemented")
mi = pd.MultiIndex.from_product([["a", "b"], [0, 1]], names=["A", "B"])
frame_data = ({"A": [1]},)
frame_mi_data = ({"A": [1, 2, 3, 4]}, mi)
# Tuple of
# - Callable: Constructor (Series, DataFrame)
# - Tuple: Constructor args
# - Callable: pass the constructed value with attrs set to this.
_all_methods = [
(
pd.Series,
(np.array([0], dtype="float64")),
operator.methodcaller("view", "int64"),
),
(pd.Series, ([0],), operator.methodcaller("take", [])),
(pd.Series, ([0],), operator.methodcaller("__getitem__", [True])),
(pd.Series, ([0],), operator.methodcaller("repeat", 2)),
pytest.param(
(pd.Series, ([0],), operator.methodcaller("reset_index")),
marks=pytest.mark.xfail,
),
(pd.Series, ([0],), operator.methodcaller("reset_index", drop=True)),
pytest.param(
(pd.Series, ([0],), operator.methodcaller("to_frame")), marks=pytest.mark.xfail
),
(pd.Series, ([0, 0],), operator.methodcaller("drop_duplicates")),
(pd.Series, ([0, 0],), operator.methodcaller("duplicated")),
(pd.Series, ([0, 0],), operator.methodcaller("round")),
(pd.Series, ([0, 0],), operator.methodcaller("rename", lambda x: x + 1)),
(pd.Series, ([0, 0],), operator.methodcaller("rename", "name")),
(pd.Series, ([0, 0],), operator.methodcaller("set_axis", ["a", "b"])),
(pd.Series, ([0, 0],), operator.methodcaller("reindex", [1, 0])),
(pd.Series, ([0, 0],), operator.methodcaller("drop", [0])),
(pd.Series, (pd.array([0, pd.NA]),), operator.methodcaller("fillna", 0)),
(pd.Series, ([0, 0],), operator.methodcaller("replace", {0: 1})),
(pd.Series, ([0, 0],), operator.methodcaller("shift")),
(pd.Series, ([0, 0],), operator.methodcaller("isin", [0, 1])),
(pd.Series, ([0, 0],), operator.methodcaller("between", 0, 2)),
(pd.Series, ([0, 0],), operator.methodcaller("isna")),
(pd.Series, ([0, 0],), operator.methodcaller("isnull")),
(pd.Series, ([0, 0],), operator.methodcaller("notna")),
(pd.Series, ([0, 0],), operator.methodcaller("notnull")),
(pd.Series, ([1],), operator.methodcaller("add", pd.Series([1]))),
# TODO: mul, div, etc.
(
pd.Series,
([0],
|
pd.period_range("2000", periods=1)
|
pandas.period_range
|
#!/usr/bin/env python
# coding: utf-8
# # 과제1, 바텀듀오의 티어
# ## 라이브러리, 데이터 로드
# In[937]:
import requests
import json
import pandas as pd
import numpy as np
from pandas.io.json import json_normalize
import warnings
warnings.filterwarnings(action='ignore')
from sklearn.preprocessing import StandardScaler,MinMaxScaler
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import math
# In[4]:
url='*****************************************************'
# In[ ]:
adc_sup_pick_red
# In[ ]:
lol_data=data.text
lol_data=lol_data.replace('\n', ',\n')
lol_data='['+lol_data+']'
lol_data=lol_data.replace(']},\n]',']}\n]')
# In[ ]:
f = open("data.txt", 'w')
f.write(lol_data)
f.close()
# In[ ]:
lol_data=json.loads(lol_data)
# In[ ]:
output_df=json_normalize(lol_data)
# In[790]:
sample=output_df
sample.reset_index(inplace=True)
del sample['index']
del sample['Unnamed: 0']
sample
# ## 데이터 전처리
# ### teams
# #### 밴, 오브젝트에 대한 간략한 정보
# In[756]:
def array_on_duplicate_keys(ordered_pairs):
d = {}
for k, v in ordered_pairs:
if k in d:
if type(d[k]) is list:
d[k].append(v)
else:
d[k] = [d[k],v]
else:
d[k] = v
return d
# In[757]:
teams_output = pd.DataFrame(columns = ['firstdragon', 'firstinhibitor', 'pickturn', 'championid', 'baronkills',
'firstriftherald', 'firstbaron', 'riftheraldkills', 'firstblood',
'teamid', 'firsttower', 'vilemawkills', 'inhibitorkills', 'towerkills',
'dominionvictoryscore', 'win', 'dragonkills'])
def split_list(a_list):
half = len(a_list)//2
return a_list[:][:half], a_list[:][half:]
# In[758]:
for i in range(len(sample)):
test=sample['teams'][i]
test=test.replace("'", "\"").replace('[{','').replace('}]','').replace('}, {', ', ').replace(' "bans":','').replace('False','\"False\"').replace('True','\"True\"')
test='[{' + test+ '}]'
test=json.loads(test, object_pairs_hook=array_on_duplicate_keys)
test=json_normalize(test)
teams_output=pd.concat([teams_output,test])
teams_output.reset_index(inplace=True)
del teams_output['index']
teams_output.head()
# In[759]:
a=[]
b=[]
teams_output_blue=pd.DataFrame()
teams_output_red=pd.DataFrame()
for i in range(teams_output.shape[0]):
for j in range(teams_output.shape[1]):
A,B=split_list(teams_output.iloc[i][j])
a.append(A)
b.append(B)
teams_output_blue=pd.concat([teams_output_blue,pd.DataFrame(pd.Series(a)).transpose()])
teams_output_red=pd.concat([teams_output_red,pd.DataFrame(pd.Series(b)).transpose()])
a=[]
b=[]
teams_output_blue.columns=teams_output.columns
teams_output_red.columns=teams_output.columns
teams_output_blue.reset_index(inplace=True)
teams_output_red.reset_index(inplace=True)
teams_output_blue=teams_output_blue.rename({'championid':'championid_ban'},axis='columns')
teams_output_red=teams_output_blue.rename({'championid':'championid_ban'},axis='columns')
del teams_output_blue['index']
del teams_output_red['index']
# In[760]:
teams_output_blue.head()
# ### participants
# #### 팀 챔피언, 오브젝트, 킬에 대한 상세한 정보
# In[761]:
participants_output=pd.DataFrame()
for i in range(len(sample)):
test=sample['participants'][i]
test=test.replace("'", "\"").replace('[{','').replace('}]','').replace('}, {', ', ').replace(' "bans":','').replace('False','\"False\"').replace('True','\"True\"')
test='[{' + test+ '}]'
test=json.loads(test, object_pairs_hook=array_on_duplicate_keys)
test=json_normalize(test)
participants_output=pd.concat([participants_output,test])
participants_output.reset_index(inplace=True)
del participants_output['index']
participants_output.head()
# In[762]:
participants_output_if=pd.DataFrame(columns=['championid', 'kills', 'deaths', 'assists'])
for i in range(len(participants_output)):
participants_output_if = participants_output_if.append(pd.DataFrame([[participants_output['championid'][i],
list(json_normalize(participants_output['stats'][i])['kills']),
list(json_normalize(participants_output['stats'][i])['deaths']),
list(json_normalize(participants_output['stats'][i])['assists'])]], columns=['championid', 'kills', 'deaths', 'assists']), ignore_index=True)
# In[763]:
a=[]
b=[]
participants_output_if_blue=pd.DataFrame()
participants_output_if_red=pd.DataFrame()
for i in range(participants_output_if.shape[0]):
for j in range(participants_output_if.shape[1]):
A,B=split_list(participants_output_if.iloc[i][j])
a.append(A)
b.append(B)
participants_output_if_blue=pd.concat([participants_output_if_blue,pd.DataFrame(pd.Series(a)).transpose()])
participants_output_if_red=pd.concat([participants_output_if_red,pd.DataFrame(pd.Series(b)).transpose()])
a=[]
b=[]
participants_output_if_blue.columns=participants_output_if.columns
participants_output_if_red.columns=participants_output_if.columns
participants_output_if_blue.reset_index(inplace=True)
participants_output_if_red.reset_index(inplace=True)
del participants_output_if_blue['index']
del participants_output_if_red['index']
# In[764]:
participants_output_if_blue.head()
# ### gameduration
# #### 게임 시간
# In[765]:
sample['gameduration'].head()
# ### participantextendedstats
# #### 게임 플레이어들의 티어
# In[766]:
participantextendedstats_output=pd.DataFrame()
for i in range(len(sample)):
test=sample['participantextendedstats'][i]
test=test.replace("'", "\"").replace('[{','').replace('}]','').replace('}, {', ', ').replace(' "bans":','').replace('False','\"False\"').replace('True','\"True\"')
test='[{' + test+ '}]'
test=json.loads(test, object_pairs_hook=array_on_duplicate_keys)
test=json_normalize(test)
participantextendedstats_output=
|
pd.concat([participantextendedstats_output,test])
|
pandas.concat
|
from datetime import datetime
import dask.dataframe as dd
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
def test_case(c, df):
result_df = c.sql(
"""
SELECT
(CASE WHEN a = 3 THEN 1 END) AS "S1",
(CASE WHEN a > 0 THEN a ELSE 1 END) AS "S2",
(CASE WHEN a = 4 THEN 3 ELSE a + 1 END) AS "S3",
(CASE WHEN a = 3 THEN 1 WHEN a > 0 THEN 2 ELSE a END) AS "S4",
CASE
WHEN (a >= 1 AND a < 2) OR (a > 2) THEN CAST('in-between' AS VARCHAR) ELSE CAST('out-of-range' AS VARCHAR)
END AS "S5",
CASE
WHEN (a < 2) OR (3 < a AND a < 4) THEN 42 ELSE 47
END AS "S6",
CASE WHEN (1 < a AND a <= 4) THEN 1 ELSE 0 END AS "S7"
FROM df
"""
)
result_df = result_df.compute()
expected_df = pd.DataFrame(index=df.index)
expected_df["S1"] = df.a.apply(lambda a: 1 if a == 3 else pd.NA)
expected_df["S2"] = df.a.apply(lambda a: a if a > 0 else 1)
expected_df["S3"] = df.a.apply(lambda a: 3 if a == 4 else a + 1)
expected_df["S4"] = df.a.apply(lambda a: 1 if a == 3 else 2 if a > 0 else a)
expected_df["S5"] = df.a.apply(
lambda a: "in-between" if ((1 <= a < 2) or (a > 2)) else "out-of-range"
)
expected_df["S6"] = df.a.apply(lambda a: 42 if ((a < 2) or (3 < a < 4)) else 47)
expected_df["S7"] = df.a.apply(lambda a: 1 if (1 < a <= 4) else 0)
# Do not check dtypes, as pandas versions are inconsistent here
assert_frame_equal(result_df, expected_df, check_dtype=False)
def test_literals(c):
df = c.sql(
"""SELECT 'a string äö' AS "S",
4.4 AS "F",
-4564347464 AS "I",
TIME '08:08:00.091' AS "T",
TIMESTAMP '2022-04-06 17:33:21' AS "DT",
DATE '1991-06-02' AS "D",
INTERVAL '1' DAY AS "IN"
"""
)
df = df.compute()
expected_df = pd.DataFrame(
{
"S": ["a string äö"],
"F": [4.4],
"I": [-4564347464],
"T": [pd.to_datetime("1970-01-01 08:08:00.091")],
"DT": [pd.to_datetime("2022-04-06 17:33:21")],
"D": [pd.to_datetime("1991-06-02 00:00")],
"IN": [pd.to_timedelta("1d")],
}
)
assert_frame_equal(df, expected_df)
def test_literal_null(c):
df = c.sql(
"""
SELECT NULL AS "N", 1 + NULL AS "I"
"""
)
df = df.compute()
expected_df = pd.DataFrame({"N": [pd.NA], "I": [pd.NA]})
expected_df["I"] = expected_df["I"].astype("Int32")
assert_frame_equal(df, expected_df)
def test_random(c, df):
result_df = c.sql(
"""
SELECT RAND(0) AS "0", RAND_INTEGER(1, 10) AS "1"
"""
)
result_df = result_df.compute()
# As the seed is fixed, this should always give the same results
expected_df = pd.DataFrame({"0": [0.26183678695392976], "1": [8]})
expected_df["1"] = expected_df["1"].astype("Int32")
assert_frame_equal(result_df, expected_df)
result_df = c.sql(
"""
SELECT RAND(42) AS "R" FROM df WHERE RAND(0) < b
"""
)
result_df = result_df.compute()
assert len(result_df) == 659
assert list(result_df["R"].iloc[:5]) == [
0.5276488824980542,
0.17861463145673728,
0.33764733440490524,
0.6590485298464198,
0.08554137165307785,
]
# If we do not fix the seed, we can just test if it works at all
result_df = c.sql(
"""
SELECT RAND() AS "0", RAND_INTEGER(10) AS "1"
"""
)
result_df = result_df.compute()
def test_not(c, string_table):
df = c.sql(
"""
SELECT
*
FROM string_table
WHERE NOT a LIKE '%normal%'
"""
)
df = df.compute()
expected_df = string_table[~string_table.a.str.contains("normal")]
assert_frame_equal(df, expected_df)
def test_operators(c, df):
result_df = c.sql(
"""
SELECT
a * b AS m,
-a AS u,
a / b AS q,
a + b AS s,
a - b AS d,
a = b AS e,
a > b AS g,
a >= b AS ge,
a < b AS l,
a <= b AS le,
a <> b AS n
FROM df
"""
)
result_df = result_df.compute()
expected_df = pd.DataFrame(index=df.index)
expected_df["m"] = df["a"] * df["b"]
expected_df["u"] = -df["a"]
expected_df["q"] = df["a"] / df["b"]
expected_df["s"] = df["a"] + df["b"]
expected_df["d"] = df["a"] - df["b"]
expected_df["e"] = df["a"] == df["b"]
expected_df["g"] = df["a"] > df["b"]
expected_df["ge"] = df["a"] >= df["b"]
expected_df["l"] = df["a"] < df["b"]
expected_df["le"] = df["a"] <= df["b"]
expected_df["n"] = df["a"] != df["b"]
assert_frame_equal(result_df, expected_df)
def test_like(c, string_table):
df = c.sql(
"""
SELECT * FROM string_table
WHERE a SIMILAR TO '%n[a-z]rmal st_i%'
"""
).compute()
assert_frame_equal(df, string_table.iloc[[0]])
df = c.sql(
"""
SELECT * FROM string_table
WHERE a LIKE '%n[a-z]rmal st_i%'
"""
).compute()
assert len(df) == 0
df = c.sql(
"""
SELECT * FROM string_table
WHERE a LIKE 'Ä%Ä_Ä%' ESCAPE 'Ä'
"""
).compute()
|
assert_frame_equal(df, string_table.iloc[[1]])
|
pandas.testing.assert_frame_equal
|
from typing import Tuple, Union, List
import abc
import logging
from pathlib import Path
import numpy as np
import pandas as pd
import spacy
import cv2
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.base import TransformerMixin, BaseEstimator
from dermclass_models.config import StructuredConfig, ImageConfig, TextConfig
from dermclass_models.validation import validate_variables
DataFrame = pd.DataFrame
Series = pd.Series
Dataset = tf.data.Dataset
Sequential = tf.keras.models.Sequential
class _SklearnPreprocessors(abc.ABC):
def __init__(self, config):
"""
An abstract class for for preprocessing data with sklearn
:param config: Config object for the class
"""
validate_variables(config)
self.config = config
self.logger = logging.getLogger(__name__)
self.df = pd.DataFrame
self.x = pd.DataFrame
self.y = pd.Series
self.x_train = pd.DataFrame
self.x_test = pd.DataFrame
self.y_train = pd.Series
self.y_test = pd.Series
@abc.abstractmethod
def _load_structured_data(self, path: Path) -> DataFrame:
"""
Abstract method for loading structured data
:param path: Path to data directory
:return: Returns a pandas DataFrame with loaded data
"""
return DataFrame()
def _split_target_structured(self, df: DataFrame = None, target_col: str = None)\
-> Tuple[DataFrame, Series]:
"""
Utility function to split target column from pandas DataFrame
:param df: A pandas DataFrame to split the target column from
:param target_col: Name of column with target data
:return: Returns a tuple of pandas DataFrame and pandas Series with target data
"""
if isinstance(df, DataFrame):
df = df
elif df is None:
df = self.df
target_col = target_col or self.config.TARGET
validate_variables(df, target_col)
x = df.drop(target_col, 1)
y = df[target_col]
self.x = x
self.y = y
self.logger.info("Successfully splat the target")
return x, y
def _split_train_test_structured(self,
x: DataFrame = None, y: Series = None,
test_size: float = 0.2, random_state: int = 42)\
-> Tuple[DataFrame, DataFrame, Series, Series]:
if isinstance(x, pd.DataFrame):
x = x
elif x is None:
x = self.x
if isinstance(y, pd.DataFrame):
y = y
elif y is None:
y = self.y
test_size = test_size or self.config.TEST_SIZE
random_state = random_state or self.config.SEED
validate_variables(x, y, test_size, random_state)
x_train, x_test, y_train, y_test = train_test_split(x, y,
test_size=test_size,
random_state=random_state)
self.x_train = x_train
self.x_test = x_test
self.y_train = y_train
self.y_test = y_test
self.logger.info("Successfully splat train and test data")
return x_train, x_test, y_train, y_test
def _load_data_structured(self, df: DataFrame = None) -> Tuple[DataFrame, DataFrame, Series, Series]:
x, y = self._split_target_structured(df)
x_train, x_test, y_train, y_test = self._split_train_test_structured(x, y)
self.logger.info("Successfully loaded the data")
return x_train, x_test, y_train, y_test
class _TfPreprocessors(abc.ABC):
def __init__(self, config):
"""
An abstract class for for preprocessing data with tensorflow
:param config: Config object for the class
"""
validate_variables(config)
self.config = config
self.logger = logging.getLogger(__name__)
self.train_dataset = Dataset
self.validation_dataset = Dataset
self.test_dataset = Dataset
self.prefetch = False
def _split_train_test_tf(self, train_dataset: Dataset = None, validation_dataset: Dataset = None):
"""
Utility function to split test data from validation dataset and set datasets to prefetch mode. Used to reduce
neural net bottleneck duing data loading stage
:param train_dataset: A train dataset
:param validation_dataset: A validation dataset to be split into validation and test datasets of equal size
:return: Returns a prefetched train, validation, testdatasets
"""
train_dataset = train_dataset or self.train_dataset
validation_dataset = validation_dataset or self.validation_dataset
validate_variables(train_dataset, validation_dataset)
validation_batches = tf.data.experimental.cardinality(validation_dataset)
test_dataset = validation_dataset.take(validation_batches // 2)
validation_dataset = validation_dataset.skip(validation_batches // 2)
if self.prefetch:
train_dataset = train_dataset.cache().prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
validation_dataset = validation_dataset.cache().prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
test_dataset = test_dataset.cache().prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
self.train_dataset = train_dataset
self.validation_dataset = validation_dataset
self.test_dataset = test_dataset
self.logger.info(f"Successfully prefetched train, test and validation datasets")
self.logger.info(f'Number of train batches: {tf.data.experimental.cardinality(train_dataset)}\
Number of validation batches: {tf.data.experimental.cardinality(validation_dataset)}\
Number of test batches: {tf.data.experimental.cardinality(test_dataset)}')
return train_dataset, validation_dataset, test_dataset
class StructuredPreprocessor(_SklearnPreprocessors):
def __init__(self, config: StructuredConfig = StructuredConfig):
"""
A class for preprocessing structured data
:param config: Config object for the class
"""
validate_variables(config)
super().__init__(config)
def _load_structured_data(self, path: Path = None) -> DataFrame:
"""
Utility function to loaod structured data from the csv
:param path: Path to data file
:return: Returns a pandas DataFrame with data loaded
"""
path = path or self.config.DATA_PATH
validate_variables(path)
df = pd.read_csv(path)
self.df = df
self.logger.info("Successfully loaded data from csv")
return df
def load_data(self, path: Path = None) -> Tuple[DataFrame, DataFrame, Series, Series]:
"""
Function to load structured data using sklearn
:param path: Path to data directory
:return: Returns a tuple with x_train, x_test, y_train, y_test data
"""
path = path or self.config.DATA_PATH
validate_variables(path)
df = self._load_structured_data(path)
x_train, x_test, y_train, y_test = self._load_data_structured(df)
return x_train, x_test, y_train, y_test
class ImagePreprocessors(_TfPreprocessors):
def __init__(self, config: ImageConfig = ImageConfig):
"""
A class for preprocessing image data
:param config: Config object for the class
"""
super().__init__(config)
self.config = config
self.logger = logging.getLogger(__name__)
self.model = None
self.img_size = ()
self.img_shape = ()
def _get_avg_img_size(self, path: Path = None) -> Tuple[int, int]:
"""
Utility function to get average image size from your data, necessary to choose proper EfficientNet version
:param path: Path to data files
:return: Returns a tuple with mean image size from provided image data
"""
path = path or self.config.DATA_PATH
validate_variables(path)
height_list = []
width_list = []
for subclass_dir in path.iterdir():
for img_path in subclass_dir.iterdir():
img = cv2.imread(str(img_path))
height, width, _ = img.shape
height_list.append(height)
width_list.append(width)
mean_height = int(sum(height_list) / len(height_list))
mean_width = int(sum(width_list) / len(width_list))
self.img_size = (mean_height, mean_width)
self.logger.info(f"Mean height is: {mean_height}, mean width is: {mean_width}")
return self.img_size
def _get_efficientnet_and_size(self, img_size: Tuple[int, int] = None) -> Tuple[Tuple[int, int], Sequential]:
"""
Utility function to get proper type of EfficientNet, the version is chosen based on the mean image size
More on: https://keras.io/examples/vision/image_classification_efficientnet_fine_tuning/
:param img_size: Mean input size of image files
:return: Returns a tuple of image_size after changing to right static value and EfficientNet object
"""
img_size = img_size or self.img_size
validate_variables(img_size)
img_size = (img_size[0] + img_size[1]) / 2
if 564 < img_size:
img_size = (600, 600)
model = tf.keras.applications.EfficientNetB7
elif 492 < img_size <= 564:
img_size = (528, 528)
model = tf.keras.applications.EfficientNetB6
else:
img_size = (456, 456)
model = tf.keras.applications.EfficientNetB5
self.img_size = img_size
self.model = model
self.logger.info(f"Chosen model is {model} with img_size {img_size}")
return img_size, model
def _load_dataset(self, batch_size: int = None, data_path: Path = None, img_size: Tuple[int, int] = None)\
-> Tuple[Dataset, Dataset]:
"""
Utility function to load dataset from provided path
:param batch_size: A batch size for the datasets
:param data_path: Path to data directory. Files should be organized for tensorflow's image_dataset_from_directory
:param img_size: Image size for loading the image data
:return: Returns a tuple with train and validation datasets
"""
batch_size = batch_size or self.config.BATCH_SIZE
data_path = data_path or self.config.DATA_PATH
img_size = img_size or self.img_size
validate_variables(batch_size, data_path, img_size)
train_dataset = tf.keras.preprocessing.image_dataset_from_directory(directory=data_path,
image_size=img_size,
validation_split=self.config.TEST_SIZE,
batch_size=batch_size,
subset="training",
seed=self.config.SEED,
shuffle=True)
validation_dataset = tf.keras.preprocessing.image_dataset_from_directory(directory=data_path,
image_size=img_size,
validation_split=self.config.TEST_SIZE,
batch_size=batch_size,
subset="validation",
seed=self.config.SEED,
shuffle=True)
self.train_dataset = validation_dataset
self.validation_dataset = validation_dataset
self.logger.info(f"Successfully loaded train and validation datasets ")
return train_dataset, validation_dataset
def load_data(self, path: Path = None) -> Tuple[Dataset, Dataset, Dataset]:
"""
Function to load image data using tensorflow
:param path: Path to data directory
:return: Returns train, validation and test datasets
"""
path = path or self.config.DATA_PATH
validate_variables(path)
img_size = self._get_avg_img_size(path)
img_size, _ = self._get_efficientnet_and_size(img_size)
train_dataset, validation_dataset = self._load_dataset(img_size=img_size, data_path=path)
train_dataset, validation_dataset, test_dataset = self._split_train_test_tf(train_dataset,
validation_dataset)
return train_dataset, validation_dataset, test_dataset
class TextPreprocessors(_SklearnPreprocessors, _TfPreprocessors):
def __init__(self, config: TextConfig = TextConfig):
"""
A class for preprocessing text data
:param config: Config object for the class
"""
_SklearnPreprocessors.__init__(self, config)
_TfPreprocessors.__init__(self, config)
self.config = config
self.logger = logging.getLogger(__name__)
def _load_class_from_dir(self, path: Path) -> DataFrame:
"""
Utility function for csvs from a directory to one pandas data frame
:param path: A path to the directory
:return: Returns pandas DataFrame with csv's loaded from one class directory
"""
validate_variables(path)
class_name = path.name
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 24 22:35:51 2021
function to check missing data
input parameter: dataframe
output: missing data values
@author: Ashish
"""
# import required libraries
import re, os, emoji, numpy as np
import pandas as pd
#Count vectorizer for N grams
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer
# Nltk for tekenize and stopwords
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
def find_missing_data_vals(data):
total=data.isnull().sum().sort_values(ascending=False)
percentage=round(total/data.shape[0]*100,2)
return pd.concat([total,percentage],axis=1,keys=['Total','Percentage'])
def categorical_value_counts(data,feature):
total=data.loc[:,feature].value_counts(dropna=False)
percentage=round(data.loc[:,feature].value_counts(dropna=False,normalize=True)*100,2)
return pd.concat([total,percentage],axis=1,keys=['Total','Percentage'])
def find_unique_values_in_column(data,feature):
unique_val=pd.Series(data.loc[:,feature].unique())
return
|
pd.concat([unique_val],axis=1,keys=['Unique Values'])
|
pandas.concat
|
import numpy as np
import pandas as pd
from keras import backend as K
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Activation, BatchNormalization, Dense, Input
from keras.models import Model
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.linear_model import ARDRegression, Ridge
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold
def root_mean_squared_error(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_true - y_pred)))
if __name__ == "__main__":
NUM_FOLDS = 50
SEED = 1000
shigeria_pred1 = np.load("shigeria_pred1.npy")
shigeria_pred2 = np.load("shigeria_pred2.npy")
shigeria_pred3 = np.load("shigeria_pred3.npy")
shigeria_pred4 = np.load("shigeria_pred4.npy")
shigeria_pred5 = np.load("shigeria_pred5.npy")
shigeria_pred6 = np.load("shigeria_pred6.npy")
shigeria_pred7 = np.load("shigeria_pred7.npy")
shigeria_pred8 = np.load("shigeria_pred8.npy")
shigeria_pred9 = np.load("shigeria_pred9.npy")
shigeria_pred10 = np.load("shigeria_pred10.npy")
upura_pred = np.load("upura_pred.npy")
takuoko_exp085 = np.load("takuoko_exp085.npy")
takuoko_exp096 = np.load("takuoko_exp096.npy")
takuoko_exp105 = np.load("takuoko_exp105.npy")
takuoko_exp108 = np.load("takuoko_exp108.npy")
takuoko_exp184 = np.load("takuoko_exp184.npy")
X_train_svd = np.load("X_train_all.npy")
X_test_svd = np.load("X_test_all.npy")
train_idx = np.load("train_idx.npy", allow_pickle=True)
svd1 = TruncatedSVD(n_components=3, n_iter=10, random_state=42)
svd1.fit(X_train_svd)
X_train_svd = svd1.transform(X_train_svd)
X_test_svd = svd1.transform(X_test_svd)
X_test = pd.DataFrame(
{
"shigeria_pred1": shigeria_pred1.reshape(-1),
"shigeria_pred2": shigeria_pred2.reshape(-1),
"shigeria_pred3": shigeria_pred3.reshape(-1),
"shigeria_pred4": shigeria_pred4.reshape(-1),
"shigeria_pred5": shigeria_pred5.reshape(-1),
"shigeria_pred6": shigeria_pred6.reshape(-1),
"shigeria_pred7": shigeria_pred7.reshape(-1),
"shigeria_pred8": shigeria_pred8.reshape(-1),
"shigeria_pred9": shigeria_pred9.reshape(-1),
"shigeria_pred10": shigeria_pred10.reshape(-1),
"upura": upura_pred,
"takuoko_exp085": takuoko_exp085,
"takuoko_exp096": takuoko_exp096,
"takuoko_exp105": takuoko_exp105,
"takuoko_exp108": takuoko_exp108,
"takuoko_exp184": takuoko_exp184,
}
)
X_test = pd.concat(
[
X_test,
pd.DataFrame(
X_test_svd, columns=[f"svd_{c}" for c in range(X_test_svd.shape[1])]
),
],
axis=1,
)
# upura oof
pred_val000 = pd.read_csv("../input/commonlit-oof/pred_val000.csv")
# shigeria oof
andrey_df = pd.read_csv("../input/commonlitstackingcsv/roberta_base_itpt.csv")
andrey_df2 =
|
pd.read_csv("../input/commonlitstackingcsv/attention_head_nopre.csv")
|
pandas.read_csv
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib
# matplotlib.use('pgf')
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.gridspec import GridSpec, GridSpecFromSubplotSpec
import matplotlib.lines as mlines
c = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd',
'#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
formatting = {
"DUN": {"color": c[0], "linestyle": "-", "marker": "o", "label": "DUN"},
"ensemble": {"color": c[2], "linestyle": "-.", "marker": "o", "label": "Ensemble"},
"dropout": {"color": c[3], "linestyle": ":", "marker": "o", "label": "Dropout"},
"SGD": {"color": c[1], "linestyle": "--", "marker": "o", "label": "SGD"},
"DUN (exact)": {"color": c[6], "linestyle": (0, [6, 2, 1, 2, 1, 2]), "marker": "o", "label": "DUN (exact)"},
"dropout (0.3)": {"color": c[7], "linestyle": ":", "marker": "p", "label": "Dropout (0.3)"},
"flow ": {"color": c[8], "linestyle": "-", "marker": "o", "label": "flow"},
}
text_width = 5.50107 # in --> Confirmed with template explanation
golden_ratio = (5**.5 - 1) / 2
show_range = 5
ylim = 3
def errorfill(x, y, yerr, color=None, alpha_fill=0.3, line_alpha=1, ax=None, lw=1, linestyle='-', fill_linewidths=0.2):
ax = ax if ax is not None else plt.gca()
# yerr *= 100
if color is None:
color = ax._get_lines.color_cycle.next()
if np.isscalar(yerr) or len(yerr) == len(y):
ymin = y - yerr
ymax = y + yerr
elif len(yerr) == 2:
ymin, ymax = yerr
plt_return = ax.plot(x, y, color=color, lw=lw, linestyle=linestyle, alpha=line_alpha)
ax.fill_between(x, ymax, ymin, color=color, alpha=alpha_fill, linewidths=fill_linewidths)
return plt_return
# Visualize the result
def visualize_uncertainty(savePath, gt_x, gt_y, xdata, mean, var):
plt.figure(dpi=200)
var = np.sqrt(var)
plt.plot(gt_x, gt_y, 'ok', ms=1)
plt.plot(xdata, mean, '-', color='g')
plt.plot(xdata, var, '-', color='r')
plt.ylim([-ylim, ylim])
plt.xlim([-show_range, show_range])
mean = np.array(mean)
var = np.array(var)
plt.fill_between(xdata, mean - var, mean + var, color='g', alpha=0.1)
plt.tight_layout()
plt.savefig(savePath, format='png', bbox_inches='tight')
def plot_err_props(df, conditions, add_cond, ax, formatting, **kwargs):
filt = (df[list(conditions)] == pd.Series(conditions)).all(axis=1)
df_filt = df[filt].dropna(subset=['err_props'])
# df_filt[df_filt.use_no_train_post == True].method = "DUN (exact)"
for idx, method in enumerate(list(add_cond)):
filt_add = (df_filt[list(add_cond[method])] == pd.Series(add_cond[method])).all(axis=1)
df_filt_add = df_filt[filt_add]
meth_filt = method
if "DUN" in meth_filt: meth_filt = "DUN"
err_props_list = np.array([[1 - float(number) for number in row["err_props"][1:-2].split(" ") if number != '']
for _, row in df_filt_add[df_filt_add.method == meth_filt].iterrows()])
mean = np.mean(err_props_list, axis=0)
std = np.std(err_props_list, axis=0)
errorfill(np.arange(0, 1, 0.005), mean, std, alpha_fill=0.2, color=formatting[method]["color"],
linestyle=formatting[method]["linestyle"], ax=ax, **kwargs)
if conditions["dataset"] == "Fashion" or conditions["dataset"] == "MNIST":
rejection_step = np.arange(0, 0.5, 0.005)
theoretical_maximum = 1 / (2 - 2*rejection_step)
elif conditions["dataset"] == "CIFAR10" or conditions["dataset"] == "CIFAR100":
rejection_step = np.arange(0, 1-0.27753, 0.005)
theoretical_maximum = (10000)/((10000 + 26032)*(1-rejection_step))
elif conditions["dataset"] == "SVHN":
rejection_step = np.arange(0, 1-0.72247, 0.005)
theoretical_maximum = (26032)/((10000 + 26032)*(1-rejection_step))
ax.plot(rejection_step, theoretical_maximum, color="k", lw=1, **kwargs)
def plot_rot_stats(df, stat, conditions, add_cond, ax, formatting, **kwargs):
filt = (df[list(conditions)] == pd.Series(conditions)).all(axis=1)
df_filt = df[filt].dropna(subset=[stat]).copy()
df_filt = df_filt[df_filt.corruption == 0.]
df_filt = df_filt[df_filt.dataset == "MNIST"]
for idx, method in enumerate(list(add_cond)):
filt_add = (df_filt[list(add_cond[method])] ==
|
pd.Series(add_cond[method])
|
pandas.Series
|
import numpy
import pandas
from collections import namedtuple
from _discover import fdr
from .stats import false_discovery_rate
def pairwise_discover_test(x, g=None, alternative="less", fdr_method="DBH"):
"""
Perform many pairwise mutual exclusivity or co-occurrence tests.
Parameters
----------
x : DiscoverMatrix
g : array_like, optional
An optional grouping vector for the rows of `x`. Pairs of rows within
the same group are not tested.
alternative : {'less', 'greater'}, optional
If 'less', a mutual-exclusivity analysis is performed, if 'greater' a
co-occurrence analysis.
fdr_method : {'DBH', 'BH'}, optional
The false discovery rate procedure used for multiple testing correction.
If 'DBH', a Benjamini-Hochberg procedure adapted for discrete test statistics
is used. If 'BH', the standard Benjamini-Hochberg procedure is used. The latter
is much faster, but also more conservative than the discrete version.
Returns
-------
result : PairwiseDiscoverResult
An object containing the test results for all pairwise combinations.
"""
assert alternative in ["less", "greater"]
assert fdr_method in ["DBH", "BH"]
discrete_fdr = fdr_method == "DBH"
events = x.events
bg = x.bg
if g is None:
pFlat, qFlat, pi0 = fdr.mutex(events, bg, alternative == "less", discrete_fdr)
if fdr_method == "BH":
qFlat = false_discovery_rate(pFlat)
pi0 = 1.0
p = numpy.empty((x.shape[0], ) * 2)
p[:] = numpy.nan
p[numpy.triu_indices_from(p, 1)] = pFlat
q = numpy.empty((x.shape[0], ) * 2)
q[:] = numpy.nan
q[numpy.triu_indices_from(p, 1)] = qFlat
else:
i = numpy.argsort(g)
levels, inverse = numpy.unique(g, return_inverse=True)
blockSizes = numpy.bincount(inverse)
p, q, pi0 = fdr.analyseblockstructure(events[i], bg[i], alternative == "less", blockSizes, discrete_fdr)
if fdr_method == "BH":
q = false_discovery_rate(p)
pi0 = 1.0
j = numpy.argsort(i)
p = p[j[:, numpy.newaxis], j]
q = q[j[:, numpy.newaxis], j]
p = pandas.DataFrame(p, index=x.rownames, columns=x.rownames)
q =
|
pandas.DataFrame(q, index=x.rownames, columns=x.rownames)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
The data module contains tools for preprocessing data. It allows users to merge timeseries, compute
daily and monthly summary statistics, and get seasonal periods of a time series.
"""
from __future__ import division
import pandas as pd
from numpy import inf, nan
__all__ = ['julian_to_gregorian', 'merge_data', 'daily_average', 'daily_std_error', 'daily_std_dev', 'monthly_average',
'monthly_std_error', 'monthly_std_dev', 'remove_nan_df', 'seasonal_period']
def julian_to_gregorian(dataframe, frequency=None, inplace=False):
"""
Converts the index of the merged dataframe from julian float values to gregorian datetime
values.
Parameters
----------
dataframe: Pandas DataFrame
A DataFrame with an index of type float
frequency: string
Optional. Sometimes when converting from julian to gregorian there will be rounding errors
due to the inability of computers to store floats as perfect decimals. Providing the
frequency will automatically attempt to round the dates. A list of all the frequencies pandas provides is found
`here <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases/>`_. Common frequencies
include daily ("D") and hourly ("H").
inplace: bool
Default False. If True, will modify the index of the dataframe in place rather than
creating a copy and returning the copy. Use when the time series are very long and making
a copy would take a large amount of memory
Returns
-------
Pandas DataFrame
A pandas DataFrame with gregorian index.
Examples
--------
>>> import pandas as pd
>>> import hydrostats.data as hd
>>> import numpy as np
>>> # The julian dates in an array
>>> julian_dates = np.array([2444239.5, 2444239.5416666665, 2444239.5833333335, 2444239.625,
>>> 2444239.6666666665, 2444239.7083333335, 2444239.75,
>>> 2444239.7916666665, 2444239.8333333335, 2444239.875])
>>> # Creating a test dataframe
>>> test_df = pd.DataFrame(data=np.random.rand(10, 2), # Random data in the columns
>>> columns=("Simulated Data", "Observed Data"),
>>> index=julian_dates)
>>> test_df
Simulated Data Observed Data
2.444240e+06 0.764719 0.126610
2.444240e+06 0.372736 0.141392
2.444240e+06 0.008645 0.686477
2.444240e+06 0.656825 0.480444
2.444240e+06 0.555247 0.869409
2.444240e+06 0.643896 0.549590
2.444240e+06 0.242720 0.799617
2.444240e+06 0.432421 0.185760
2.444240e+06 0.694631 0.136986
2.444240e+06 0.700422 0.390415
>>> # Making a new df with gregorian index
>>> test_df_gregorian = hd.julian_to_gregorian(test_df)
>>> test_df_gregorian
Simulated Data Observed Data
1980-01-01 00:00:00.000000 0.585454 0.457238
1980-01-01 01:00:00.028800 0.524764 0.083464
1980-01-01 01:59:59.971200 0.516821 0.416683
1980-01-01 03:00:00.000000 0.948483 0.553874
1980-01-01 04:00:00.028800 0.492280 0.232901
1980-01-01 04:59:59.971200 0.527967 0.296395
1980-01-01 06:00:00.000000 0.650018 0.212802
1980-01-01 07:00:00.028800 0.585592 0.802971
1980-01-01 07:59:59.971200 0.448243 0.665814
1980-01-01 09:00:00.000000 0.137395 0.201721
>>> # Rounding can be applied due to floating point inaccuracy
>>> test_df_gregorian_rounded = julian_to_gregorian(test_df, frequency="H") # Hourly Rounding Frequency
>>> test_df_gregorian_rounded
Simulated Data Observed Data
1980-01-01 00:00:00 0.309527 0.938991
1980-01-01 01:00:00 0.872284 0.497708
1980-01-01 02:00:00 0.168046 0.225845
1980-01-01 03:00:00 0.954494 0.275607
1980-01-01 04:00:00 0.875885 0.194380
1980-01-01 05:00:00 0.236849 0.992770
1980-01-01 06:00:00 0.639346 0.029808
1980-01-01 07:00:00 0.855828 0.903927
1980-01-01 08:00:00 0.638805 0.916124
1980-01-01 09:00:00 0.273430 0.443980
>>> # The DataFrame can also be modified in place, increasing efficiency with large time series
>>> julian_to_gregorian(test_df, inplace=True, frequency="H")
>>> test_df
Simulated Data Observed Data
1980-01-01 00:00:00 0.309527 0.938991
1980-01-01 01:00:00 0.872284 0.497708
1980-01-01 02:00:00 0.168046 0.225845
1980-01-01 03:00:00 0.954494 0.275607
1980-01-01 04:00:00 0.875885 0.194380
1980-01-01 05:00:00 0.236849 0.992770
1980-01-01 06:00:00 0.639346 0.029808
1980-01-01 07:00:00 0.855828 0.903927
1980-01-01 08:00:00 0.638805 0.916124
1980-01-01 09:00:00 0.273430 0.443980
"""
if inplace:
dataframe.index = pd.to_datetime(dataframe.index, origin="julian", unit="D")
if frequency is not None:
dataframe.index = dataframe.index.round(frequency)
else:
# Copying to avoid modifying the original dataframe
return_df = dataframe.copy()
# Converting the dataframe index from julian to gregorian
return_df.index = pd.to_datetime(return_df.index, origin="julian", unit="D")
if frequency is not None:
return_df.index = return_df.index.round(frequency)
return return_df
def merge_data(sim_fpath=None, obs_fpath=None, sim_df=None, obs_df=None, interpolate=None,
column_names=('Simulated', 'Observed'), simulated_tz=None, observed_tz=None, interp_type='pchip',
return_tz="Etc/UTC", julian=False, julian_freq=None):
"""Merges two dataframes or csv files, depending on the input.
Parameters
----------
sim_fpath: str
The filepath to the simulated csv of data. Can be a url if the page is formatted correctly.
The csv must be formatted with the dates in the left column and the data in the right
column.
obs_fpath: str
The filepath to the observed csv. Can be a url if the page is formatted correctly.
The csv must be formatted with the dates in the left column and the data in the right
column.
sim_df: DataFrame
A pandas DataFrame containing the simulated data. Must be formatted with a datetime index
and the simulated data values in column 0.
obs_df: DataFrame
A pandas DataFrame containing the simulated data. Must be formatted with a datetime index
and the simulated data values in column 0.
interpolate: str
Must be either 'observed' or 'simulated'. Specifies which data set you would like to
interpolate if interpolation is needed to properly merge the data.
column_names: tuple of str
Tuple of length two containing the column names that the user would like to set for the
DataFrame that is returned. Note that the simulated data will be in the left column and the
observed data will be in the right column
simulated_tz: str
The timezone of the simulated data. A full list of timezones can be found in the
:ref:`timezones`.
observed_tz: str
The timezone of the simulated data. A full list of timezones can be found in the
:ref:`timezones`.
interp_type: str
Which interpolation method to use. Uses the default pandas interpolater.
Available types are found at
http://pandas.pydata.org/pandas-docs/version/0.16.2/generated/pandas.DataFrame.interpolate.html
return_tz: str
What timezone the merged dataframe's index should be returned as. Default is 'Etc/UTC', which is recommended
for simplicity.
julian: bool
If True, will parse the first column of the file to a datetime index from julian floating point time
representation, this is only valid when supplying the sim_fpath and obs_fpath parameters. Users supplying two
DataFrame objects must convert the index from Julian to Gregorian using the julian_to_gregorian function in this
module
julian_freq: str
A string representing the frequency of the julian dates so that they can be rounded. See examples for usage.
Notes
-----
The only acceptable time frequencies in the data are 15min, 30min, 45min, and any number of hours or
days in between.
There are three scenarios to consider when merging your data:
1. The first scenario is that the timezones and the spacing of the time series matches
(eg. 1 Day). In this case, you will want to leave the simulated_tz, observed_tz, and
interpolate arguments empty, and the function will simply join the two csv's into a dataframe.
2. The second scenario is that you have two time series with matching time zones but not
matching spacing. In this case you will want to leave the simulated_tz and observed_tz empty,
and use the interpolate argument to tell the function which time series you would like to
interpolate to match the other time series.
3. The third scenario is that you have two time series with different time zones and possibly
different spacings. In this case you will want to fill in the simulated_tz, observed_tz, and
interpolate arguments. This will then take timezones into account when interpolating
the selected time series.
Examples
--------
>>> import hydrostats.data as hd
>>> import pandas as pd
>>> pd.options.display.max_rows = 15
The data URLs contain streamflow data from two different models, and are provided from the Hydrostats Github page
>>> sfpt_url = r'https://github.com/waderoberts123/Hydrostats/raw/master/Sample_data/sfpt_data/magdalena-calamar_interim_data.csv'
>>> glofas_url = r'https://github.com/waderoberts123/Hydrostats/raw/master/Sample_data/GLOFAS_Data/magdalena-calamar_ECMWF_data.csv'
>>> merged_df = hd.merge_data(sfpt_url, glofas_url, column_names=('Streamflow Prediction Tool', 'GLOFAS'))
"""
# Reading the data into dataframes if from file
if sim_fpath is not None and obs_fpath is not None:
# Importing data into a data-frame
sim_df_copy = pd.read_csv(sim_fpath, delimiter=",", header=None, names=[column_names[0]],
index_col=0, infer_datetime_format=True, skiprows=1)
obs_df_copy = pd.read_csv(obs_fpath, delimiter=",", header=None, names=[column_names[1]],
index_col=0, infer_datetime_format=True, skiprows=1)
# Converting the index to datetime type
if julian:
julian_to_gregorian(sim_df_copy, frequency=julian_freq, inplace=True)
julian_to_gregorian(obs_df_copy, frequency=julian_freq, inplace=True)
else:
sim_df_copy.index =
|
pd.to_datetime(sim_df_copy.index, infer_datetime_format=True, errors='coerce')
|
pandas.to_datetime
|
import re
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
class TestSeriesReplace:
def test_replace_explicit_none(self):
# GH#36984 if the user explicitly passes value=None, give it to them
ser = pd.Series([0, 0, ""], dtype=object)
result = ser.replace("", None)
expected = pd.Series([0, 0, None], dtype=object)
tm.assert_series_equal(result, expected)
df = pd.DataFrame(np.zeros((3, 3)))
df.iloc[2, 2] = ""
result = df.replace("", None)
expected = pd.DataFrame(
{
0: np.zeros(3),
1: np.zeros(3),
2: np.array([0.0, 0.0, None], dtype=object),
}
)
assert expected.iloc[2, 2] is None
tm.assert_frame_equal(result, expected)
# GH#19998 same thing with object dtype
ser = pd.Series([10, 20, 30, "a", "a", "b", "a"])
result = ser.replace("a", None)
expected = pd.Series([10, 20, 30, None, None, "b", None])
assert expected.iloc[-1] is None
tm.assert_series_equal(result, expected)
def test_replace_noop_doesnt_downcast(self):
# GH#44498
ser = pd.Series([None, None, pd.Timestamp("2021-12-16 17:31")], dtype=object)
res = ser.replace({np.nan: None}) # should be a no-op
tm.assert_series_equal(res, ser)
assert res.dtype == object
# same thing but different calling convention
res = ser.replace(np.nan, None)
tm.assert_series_equal(res, ser)
assert res.dtype == object
def test_replace(self):
N = 100
ser = pd.Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
return_value = ser.replace([np.nan], -1, inplace=True)
assert return_value is None
exp = ser.fillna(-1)
tm.assert_series_equal(ser, exp)
rs = ser.replace(0.0, np.nan)
ser[ser == 0.0] = np.nan
tm.assert_series_equal(rs, ser)
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_nan_with_inf(self):
ser = pd.Series([np.nan, 0, np.inf])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = pd.Series([np.nan, 0, "foo", "bar", np.inf, None, pd.NaT])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
tm.assert_series_equal(ser.replace(np.inf, 0), filled)
def test_replace_listlike_value_listlike_target(self, datetime_series):
ser = pd.Series(datetime_series.index)
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
msg = r"Replacement lists must match in length\. Expecting 3 got 2"
with pytest.raises(ValueError, match=msg):
ser.replace([1, 2, 3], [np.nan, 0])
# ser is dt64 so can't hold 1 or 2, so this replace is a no-op
result = ser.replace([1, 2], [np.nan, 0])
tm.assert_series_equal(result, ser)
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))
def test_replace_gh5319(self):
# API change from 0.12?
# GH 5319
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
tm.assert_series_equal(result, expected)
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace(np.nan)
tm.assert_series_equal(result, expected)
def test_replace_datetime64(self):
# GH 5797
ser = pd.Series(pd.date_range("20130101", periods=5))
expected = ser.copy()
expected.loc[2] = pd.Timestamp("20120101")
result = ser.replace({pd.Timestamp("20130103"): pd.Timestamp("20120101")})
tm.assert_series_equal(result, expected)
result = ser.replace(pd.Timestamp("20130103"), pd.Timestamp("20120101"))
tm.assert_series_equal(result, expected)
def test_replace_nat_with_tz(self):
# GH 11792: Test with replacing NaT in a list with tz data
ts = pd.Timestamp("2015/01/01", tz="UTC")
s = pd.Series([pd.NaT, pd.Timestamp("2015/01/01", tz="UTC")])
result = s.replace([np.nan, pd.NaT], pd.Timestamp.min)
expected = pd.Series([pd.Timestamp.min, ts], dtype=object)
tm.assert_series_equal(expected, result)
def test_replace_timedelta_td64(self):
tdi = pd.timedelta_range(0, periods=5)
ser = pd.Series(tdi)
# Using a single dict argument means we go through replace_list
result = ser.replace({ser[1]: ser[3]})
expected = pd.Series([ser[0], ser[3], ser[2], ser[3], ser[4]])
tm.assert_series_equal(result, expected)
def test_replace_with_single_list(self):
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([1, 2, 3])
tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4]))
s = ser.copy()
return_value = s.replace([1, 2, 3], inplace=True)
assert return_value is None
tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4]))
# make sure things don't get corrupted when fillna call fails
s = ser.copy()
msg = (
r"Invalid fill method\. Expecting pad \(ffill\) or backfill "
r"\(bfill\)\. Got crash_cymbal"
)
with pytest.raises(ValueError, match=msg):
return_value = s.replace([1, 2, 3], inplace=True, method="crash_cymbal")
assert return_value is None
tm.assert_series_equal(s, ser)
def test_replace_mixed_types(self):
ser = pd.Series(np.arange(5), dtype="int64")
def check_replace(to_rep, val, expected):
sc = ser.copy()
result = ser.replace(to_rep, val)
return_value = sc.replace(to_rep, val, inplace=True)
assert return_value is None
tm.assert_series_equal(expected, result)
tm.assert_series_equal(expected, sc)
# 3.0 can still be held in our int64 series, so we do not upcast GH#44940
tr, v = [3], [3.0]
check_replace(tr, v, ser)
# Note this matches what we get with the scalars 3 and 3.0
check_replace(tr[0], v[0], ser)
# MUST upcast to float
e = pd.Series([0, 1, 2, 3.5, 4])
tr, v = [3], [3.5]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, "a"])
tr, v = [3, 4], [3.5, "a"]
check_replace(tr, v, e)
# again casts to object
e = pd.Series([0, 1, 2, 3.5, pd.Timestamp("20130101")])
tr, v = [3, 4], [3.5, pd.Timestamp("20130101")]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, True], dtype="object")
tr, v = [3, 4], [3.5, True]
check_replace(tr, v, e)
# test an object with dates + floats + integers + strings
dr = pd.Series(pd.date_range("1/1/2001", "1/10/2001", freq="D"))
result = dr.astype(object).replace([dr[0], dr[1], dr[2]], [1.0, 2, "a"])
expected = pd.Series([1.0, 2, "a"] + dr[3:].tolist(), dtype=object)
tm.assert_series_equal(result, expected)
def test_replace_bool_with_string_no_op(self):
s = pd.Series([True, False, True])
result = s.replace("fun", "in-the-sun")
tm.assert_series_equal(s, result)
def test_replace_bool_with_string(self):
# nonexistent elements
s = pd.Series([True, False, True])
result = s.replace(True, "2u")
expected = pd.Series(["2u", False, "2u"])
tm.assert_series_equal(expected, result)
def test_replace_bool_with_bool(self):
s = pd.Series([True, False, True])
result = s.replace(True, False)
expected = pd.Series([False] * len(s))
tm.assert_series_equal(expected, result)
def test_replace_with_dict_with_bool_keys(self):
s = pd.Series([True, False, True])
result = s.replace({"asdf": "asdb", True: "yes"})
expected = pd.Series(["yes", False, "yes"])
tm.assert_series_equal(result, expected)
def test_replace_Int_with_na(self, any_int_ea_dtype):
# GH 38267
result = pd.Series([0, None], dtype=any_int_ea_dtype).replace(0, pd.NA)
expected = pd.Series([pd.NA, pd.NA], dtype=any_int_ea_dtype)
tm.assert_series_equal(result, expected)
result = pd.Series([0, 1], dtype=any_int_ea_dtype).replace(0, pd.NA)
result.replace(1, pd.NA, inplace=True)
tm.assert_series_equal(result, expected)
def test_replace2(self):
N = 100
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_with_dictlike_and_string_dtype(self, nullable_string_dtype):
# GH 32621, GH#44940
ser = pd.Series(["one", "two", np.nan], dtype=nullable_string_dtype)
expected = pd.Series(["1", "2", np.nan], dtype=nullable_string_dtype)
result = ser.replace({"one": "1", "two": "2"})
tm.assert_series_equal(expected, result)
def test_replace_with_empty_dictlike(self):
# GH 15289
s = pd.Series(list("abcd"))
tm.assert_series_equal(s, s.replace({}))
with tm.assert_produces_warning(FutureWarning):
empty_series = pd.Series([])
tm.assert_series_equal(s, s.replace(empty_series))
def test_replace_string_with_number(self):
# GH 15743
s = pd.Series([1, 2, 3])
result = s.replace("2", np.nan)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(expected, result)
def test_replace_replacer_equals_replacement(self):
# GH 20656
# make sure all replacers are matching against original values
s = pd.Series(["a", "b"])
expected = pd.Series(["b", "a"])
result = s.replace({"a": "b", "b": "a"})
tm.assert_series_equal(expected, result)
def test_replace_unicode_with_number(self):
# GH 15743
s = pd.Series([1, 2, 3])
result = s.replace("2", np.nan)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(expected, result)
def test_replace_mixed_types_with_string(self):
# Testing mixed
s = pd.Series([1, 2, 3, "4", 4, 5])
result = s.replace([2, "4"], np.nan)
expected = pd.Series([1, np.nan, 3, np.nan, 4, 5])
tm.assert_series_equal(expected, result)
@pytest.mark.parametrize(
"categorical, numeric",
[
(pd.Categorical(["A"], categories=["A", "B"]), [1]),
(pd.Categorical(["A", "B"], categories=["A", "B"]), [1, 2]),
],
)
def test_replace_categorical(self, categorical, numeric):
# GH 24971, GH#23305
ser = pd.Series(categorical)
result = ser.replace({"A": 1, "B": 2})
expected = pd.Series(numeric).astype("category")
if 2 not in expected.cat.categories:
# i.e. categories should be [1, 2] even if there are no "B"s present
# GH#44940
expected = expected.cat.add_categories(2)
tm.assert_series_equal(expected, result)
def test_replace_categorical_single(self):
# GH 26988
dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific")
s = pd.Series(dti)
c = s.astype("category")
expected = c.copy()
expected = expected.cat.add_categories("foo")
expected[2] = "foo"
expected = expected.cat.remove_unused_categories()
assert c[2] != "foo"
result = c.replace(c[2], "foo")
tm.assert_series_equal(expected, result)
assert c[2] != "foo" # ensure non-inplace call does not alter original
return_value = c.replace(c[2], "foo", inplace=True)
assert return_value is None
tm.assert_series_equal(expected, c)
first_value = c[0]
return_value = c.replace(c[1], c[0], inplace=True)
assert return_value is None
assert c[0] == c[1] == first_value # test replacing with existing value
def test_replace_with_no_overflowerror(self):
# GH 25616
# casts to object without Exception from OverflowError
s = pd.Series([0, 1, 2, 3, 4])
result = s.replace([3], ["100000000000000000000"])
expected = pd.Series([0, 1, 2, "100000000000000000000", 4])
tm.assert_series_equal(result, expected)
s = pd.Series([0, "100000000000000000000", "100000000000000000001"])
result = s.replace(["100000000000000000000"], [1])
expected = pd.Series([0, 1, "100000000000000000001"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ser, to_replace, exp",
[
([1, 2, 3], {1: 2, 2: 3, 3: 4}, [2, 3, 4]),
(["1", "2", "3"], {"1": "2", "2": "3", "3": "4"}, ["2", "3", "4"]),
],
)
def test_replace_commutative(self, ser, to_replace, exp):
# GH 16051
# DataFrame.replace() overwrites when values are non-numeric
series = pd.Series(ser)
expected = pd.Series(exp)
result = series.replace(to_replace)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ser, exp", [([1, 2, 3], [1, True, 3]), (["x", 2, 3], ["x", True, 3])]
)
def test_replace_no_cast(self, ser, exp):
# GH 9113
# BUG: replace int64 dtype with bool coerces to int64
series = pd.Series(ser)
result = series.replace(2, True)
expected = pd.Series(exp)
tm.assert_series_equal(result, expected)
def test_replace_invalid_to_replace(self):
# GH 18634
# API: replace() should raise an exception if invalid argument is given
series = pd.Series(["a", "b", "c "])
msg = (
r"Expecting 'to_replace' to be either a scalar, array-like, "
r"dict or None, got invalid type.*"
)
with pytest.raises(TypeError, match=msg):
series.replace(lambda x: x.strip())
@pytest.mark.parametrize("frame", [False, True])
def test_replace_nonbool_regex(self, frame):
obj = pd.Series(["a", "b", "c "])
if frame:
obj = obj.to_frame()
msg = "'to_replace' must be 'None' if 'regex' is not a bool"
with pytest.raises(ValueError, match=msg):
obj.replace(to_replace=["a"], regex="foo")
@pytest.mark.parametrize("frame", [False, True])
def test_replace_empty_copy(self, frame):
obj = pd.Series([], dtype=np.float64)
if frame:
obj = obj.to_frame()
res = obj.replace(4, 5, inplace=True)
assert res is None
res = obj.replace(4, 5, inplace=False)
tm.assert_equal(res, obj)
assert res is not obj
def test_replace_only_one_dictlike_arg(self, fixed_now_ts):
# GH#33340
ser = pd.Series([1, 2, "A", fixed_now_ts, True])
to_replace = {0: 1, 2: "A"}
value = "foo"
msg = "Series.replace cannot use dict-like to_replace and non-None value"
with pytest.raises(ValueError, match=msg):
ser.replace(to_replace, value)
to_replace = 1
value = {0: "foo", 2: "bar"}
msg = "Series.replace cannot use dict-value and non-None to_replace"
with pytest.raises(ValueError, match=msg):
ser.replace(to_replace, value)
def test_replace_extension_other(self, frame_or_series):
# https://github.com/pandas-dev/pandas/issues/34530
obj = frame_or_series(pd.array([1, 2, 3], dtype="Int64"))
result = obj.replace("", "") # no exception
# should not have changed dtype
tm.assert_equal(obj, result)
def _check_replace_with_method(self, ser: pd.Series):
df = ser.to_frame()
res = ser.replace(ser[1], method="pad")
expected = pd.Series([ser[0], ser[0]] + list(ser[2:]), dtype=ser.dtype)
tm.assert_series_equal(res, expected)
res_df = df.replace(ser[1], method="pad")
tm.assert_frame_equal(res_df, expected.to_frame())
ser2 = ser.copy()
res2 = ser2.replace(ser[1], method="pad", inplace=True)
assert res2 is None
tm.assert_series_equal(ser2, expected)
res_df2 = df.replace(ser[1], method="pad", inplace=True)
assert res_df2 is None
tm.assert_frame_equal(df, expected.to_frame())
def test_replace_ea_dtype_with_method(self, any_numeric_ea_dtype):
arr = pd.array([1, 2, pd.NA, 4], dtype=any_numeric_ea_dtype)
ser = pd.Series(arr)
self._check_replace_with_method(ser)
@pytest.mark.parametrize("as_categorical", [True, False])
def test_replace_interval_with_method(self, as_categorical):
# in particular interval that can't hold NA
idx = pd.IntervalIndex.from_breaks(range(4))
ser = pd.Series(idx)
if as_categorical:
ser = ser.astype("category")
self._check_replace_with_method(ser)
@pytest.mark.parametrize("as_period", [True, False])
@pytest.mark.parametrize("as_categorical", [True, False])
def test_replace_datetimelike_with_method(self, as_period, as_categorical):
idx = pd.date_range("2016-01-01", periods=5, tz="US/Pacific")
if as_period:
idx = idx.tz_localize(None).to_period("D")
ser = pd.Series(idx)
ser.iloc[-2] = pd.NaT
if as_categorical:
ser = ser.astype("category")
self._check_replace_with_method(ser)
def test_replace_with_compiled_regex(self):
# https://github.com/pandas-dev/pandas/issues/35680
s = pd.Series(["a", "b", "c"])
regex = re.compile("^a$")
result = s.replace({regex: "z"}, regex=True)
expected = pd.Series(["z", "b", "c"])
tm.assert_series_equal(result, expected)
def test_pandas_replace_na(self):
# GH#43344
ser = pd.Series(["AA", "BB", "CC", "DD", "EE", "", pd.NA], dtype="string")
regex_mapping = {
"AA": "CC",
"BB": "CC",
"EE": "CC",
"CC": "CC-REPL",
}
result = ser.replace(regex_mapping, regex=True)
exp = pd.Series(["CC", "CC", "CC-REPL", "DD", "CC", "", pd.NA], dtype="string")
tm.assert_series_equal(result, exp)
@pytest.mark.parametrize(
"dtype, input_data, to_replace, expected_data",
[
("bool", [True, False], {True: False}, [False, False]),
("int64", [1, 2], {1: 10, 2: 20}, [10, 20]),
("Int64", [1, 2], {1: 10, 2: 20}, [10, 20]),
("float64", [1.1, 2.2], {1.1: 10.1, 2.2: 20.5}, [10.1, 20.5]),
("Float64", [1.1, 2.2], {1.1: 10.1, 2.2: 20.5}, [10.1, 20.5]),
("string", ["one", "two"], {"one": "1", "two": "2"}, ["1", "2"]),
(
pd.IntervalDtype("int64"),
IntervalArray([pd.Interval(1, 2), pd.Interval(2, 3)]),
{pd.Interval(1, 2): pd.Interval(10, 20)},
IntervalArray([pd.Interval(10, 20), pd.Interval(2, 3)]),
),
(
pd.IntervalDtype("float64"),
IntervalArray([pd.Interval(1.0, 2.7), pd.Interval(2.8, 3.1)]),
{pd.Interval(1.0, 2.7): pd.Interval(10.6, 20.8)},
IntervalArray([pd.Interval(10.6, 20.8), pd.Interval(2.8, 3.1)]),
),
(
pd.PeriodDtype("M"),
[pd.Period("2020-05", freq="M")],
{pd.Period("2020-05", freq="M"): pd.Period("2020-06", freq="M")},
[pd.Period("2020-06", freq="M")],
),
],
)
def test_replace_dtype(self, dtype, input_data, to_replace, expected_data):
# GH#33484
ser = pd.Series(input_data, dtype=dtype)
result = ser.replace(to_replace)
expected = pd.Series(expected_data, dtype=dtype)
tm.assert_series_equal(result, expected)
def test_replace_string_dtype(self):
# GH#40732, GH#44940
ser = pd.Series(["one", "two", np.nan], dtype="string")
res = ser.replace({"one": "1", "two": "2"})
expected = pd.Series(["1", "2", np.nan], dtype="string")
tm.assert_series_equal(res, expected)
# GH#31644
ser2 = pd.Series(["A", np.nan], dtype="string")
res2 = ser2.replace("A", "B")
expected2 = pd.Series(["B", np.nan], dtype="string")
tm.assert_series_equal(res2, expected2)
ser3 = pd.Series(["A", "B"], dtype="string")
res3 = ser3.replace("A", pd.NA)
expected3 = pd.Series([pd.NA, "B"], dtype="string")
tm.assert_series_equal(res3, expected3)
def test_replace_string_dtype_list_to_replace(self):
# GH#41215, GH#44940
ser = pd.Series(["abc", "def"], dtype="string")
res = ser.replace(["abc", "any other string"], "xyz")
expected = pd.Series(["xyz", "def"], dtype="string")
|
tm.assert_series_equal(res, expected)
|
pandas._testing.assert_series_equal
|
import pandas as pd
import urllib.request
import traceback
from backend.common import *
DATA_PATH = f'{get_root()}/data/world.xlsx'
def consolidate_country_col(df, country_col, country_id_col, covid_df):
"""
This method adjusts the values in the country field of the passed DF
so that the values are matching those in the covid_DF whenever possible,
so that we can subsequently join them on the country field.
"""
covid_countries = covid_df[['country_id', 'country']].drop_duplicates()
covid_countries['country_lower'] = covid_countries['country'].str.lower()
covid_countries['country_id_lower'] = covid_countries['country_id'].str.lower()
df = df.rename(columns={
country_col: 'country_other',
country_id_col: 'country_id_other',
})
df['country_other_lower'] = df['country_other'].str.lower()
df['country_id_other_lower'] = df['country_id_other'].str.lower()
def _take_first_non_null_col(_df, _cols):
return _df[_cols].fillna(method='bfill', axis=1).iloc[:, 0]
def _consolidate_on(_df, col):
_join_df = covid_countries.set_index(f'{col}_lower')
_df = _df.join(_join_df, on=f'{col}_other_lower')
_df['country_other'] = _take_first_non_null_col(_df, ['country', 'country_other'])
for c in _join_df.columns:
del _df[c]
return _df
df = _consolidate_on(df, 'country_id')
df = _consolidate_on(df, 'country')
df = df[df['country_other'].isin(covid_countries['country'])]
del df['country_id_other']
del df['country_other_lower']
del df['country_id_other_lower']
df = df.rename(columns={
'country_other': 'country'
})
return df
def get_google_mobility_df(covid_df):
url = 'https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv'
df = pd.read_csv(url, nrows=1)
dtypes = {col: 'float' if col.endswith('baseline') else 'object' for col in df.columns}
df = pd.read_csv(url, dtype=dtypes)
del df['iso_3166_2_code']
del df['census_fips_code']
df = consolidate_country_col(df, 'country_region', 'country_region_code', covid_df)
df = df[pd.isna(df['sub_region_1'])]
del df['sub_region_1']
del df['sub_region_2']
to_rep = '_percent_change_from_baseline'
for col in df.columns:
if col.endswith(to_rep):
df = df.rename(columns={col: 'pc_' + col.replace(to_rep, '')})
df = df[pd.isnull(df['metro_area'])]
del df['metro_area']
return df
def get_covid_df():
# get the data
url = 'https://www.ecdc.europa.eu/sites/default/files/documents/COVID-19-geographic-disbtribution-worldwide.xlsx'
df = pd.read_excel(url)
# basic processing
df = df.rename(columns={
'dateRep': 'date',
'countriesAndTerritories': 'country',
'popData2019': 'population',
'geoId': 'country_id',
'continentExp': 'continent'
})
del df['countryterritoryCode']
df.loc[df['country'] == 'Namibia', 'country_id'] = 'NAMIBIA'
df.loc[df['country'] == 'Czechia', 'population'] = 10650000
df['population'] = df['population'].fillna(0)
df['country'] = df['country'].str.replace('_', ' ')
if df['date'].dtype.name == 'object':
df['date'] =
|
pd.to_datetime(df['date'], format="%d/%m/%Y")
|
pandas.to_datetime
|
import shutil
import numpy as np
import math
import pandas as pd
from urllib.request import urlopen
import cv2
from skimage import exposure
import shapely
import glob
import os
from osgeo import gdal
import utm
import itertools
import geopandas as gpd
import pathlib
import matplotlib.pyplot as plt
import matplotlib._color_data as mcd
import contextily as ctx
import time
cycle = list(mcd.XKCD_COLORS.values())
import hsfm
import bare
"""
Core data wrangling and preprocessing functions.
"""
# TODO
# - break this up into seperate libraries and classes to better
# accomodate other imagery and generealize upstream as much as possible.
def get_gcp_polygon(fn):
file_name = os.path.splitext(os.path.split(fn)[-1])[0]
df = pd.read_csv(fn, header=None, sep=' ')
df = df[[1,2]]
df.columns=['lat','lon']
gdf = hsfm.geospatial.df_points_to_polygon_gdf(df)
gdf['camera'] = file_name
return gdf
def create_overlap_list(gcp_directory,
image_directory,
output_directory):
output_directory = os.path.join(output_directory, 'ba')
hsfm.io.create_dir(output_directory)
filename_out = os.path.join(output_directory,'overlaplist.txt')
if os.path.exists(filename_out):
os.remove(filename_out)
gcp_files = glob.glob(os.path.join(gcp_directory,'*.gcp'))
image_files = glob.glob(os.path.join(image_directory,'*.tif'))
footprints = []
for fn in gcp_files:
gdf = get_gcp_polygon(fn)
footprints.append(gdf)
pairs=[]
for a, b in itertools.combinations(footprints, 2):
result = hsfm.geospatial.compare_footprints(a, b)
if result == 1:
c = hsfm.io.retrieve_match(a['camera'].values[0] , image_files)
d = hsfm.io.retrieve_match(b['camera'].values[0] , image_files)
pairs.append((c,d))
pairs = sorted(list(set(pairs)))
for i in pairs:
with open(filename_out, 'a') as out:
out.write(i[0] + ' '+ i[1]+'\n')
return filename_out
def create_overlap_list_from_match_files(match_files_directory,
image_directory,
output_directory,
suffix='.match'):
output_directory = os.path.join(output_directory, 'ba')
hsfm.io.create_dir(output_directory)
filename_out = os.path.join(output_directory,'overlaplist.txt')
if os.path.exists(filename_out):
os.remove(filename_out)
match_files = sorted(glob.glob(os.path.join(match_files_directory, '*' + suffix)))
match_files
pairs = []
for match_file in match_files:
img1_fn, img2_fn = bare.core.parse_image_names_from_match_file_name(match_file,
image_directory,
'tif')
pairs.append((img1_fn, img2_fn))
# creates full set from .match and clean.match pairs
pairs = sorted(list(set(pairs)))
for i in pairs:
with open(filename_out, 'a') as out:
out.write(i[0] + ' '+ i[1]+'\n')
return filename_out
def determine_flight_lines(df,
cutoff_angle = 30,
file_base_name_column = 'fileName',
longitude_column = 'Longitude',
latitude_column = 'Latitude'):
df = hsfm.batch.calculate_heading_from_metadata(df,
file_base_name_column = file_base_name_column,
longitude_column = longitude_column,
latitude_column = latitude_column)
df['next_heading'] = df['heading'].shift(-1)
df['heading_diff'] = abs(df['next_heading'] - df['heading'])
df['heading_diff'] = df['heading_diff'].fillna(0)
df = df.reset_index(drop=True)
flights_tmp = []
tmp_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn import tree
def get_one_hot_from_categories(dataframe):
data_cols = dataframe.select_dtypes('category').columns
data_cols_dropped = dataframe.drop(columns=data_cols, axis=1)
one_hot = pd.get_dummies(dataframe[data_cols])
return pd.concat([data_cols_dropped, one_hot], axis=1, join='inner')
def fill_question_marks_based_on_predicting(database, column, test_percentage):
print('Column refactored - ' + str(column))
test_data = database.all_values[(database.all_values[column].values == '?')].copy()
test_label = test_data[column]
train_data = database.all_values[(database.all_values[column].values != '?')].copy().sample(
frac=0.1)
train_label = train_data[column]
test_data.drop(columns=[column], inplace=True)
train_data.drop(columns=[column], inplace=True)
train_data = get_one_hot_from_categories(train_data)
test_data = get_one_hot_from_categories(test_data)
clf = tree.DecisionTreeClassifier()
clf = clf.fit(train_data, train_label)
clf_pred = clf.predict(test_data)
r_forest = RandomForestClassifier(n_estimators=10)
r_forest.fit(train_data, train_label)
r_forest_pred = r_forest.predict(test_data)
majority_class = database.all_values[column].value_counts().index[0]
pred_df =
|
pd.DataFrame({'RFor': r_forest_pred, 'DTree': clf_pred})
|
pandas.DataFrame
|
#import multi_operation_planning
#from multi_operation_planning.solar_irradiance import aoi, get_total_irradiance
#from multi_operation_planning.solar_position import get_solarposition
import csv
from csv import writer, reader
import pandas as pd
import datetime
import os
import sys
import DES_weather_analysis
from DES_weather_analysis import solar_irradiance
from DES_weather_analysis.solar_irradiance import aoi, get_total_irradiance
from DES_weather_analysis.solar_position import get_solarposition
from pvlib import atmosphere, solarposition, tools
class GTI_class:
def __init__(self,year,path_test,weather_path,TMYs=None,AMYs=None):
if TMYs is None:
self.TMYs = None
else:
self.TMYs = TMYs
if AMYs is None:
self.AMYs = None
else:
self.AMYs = AMYs
editable_data_path =os.path.join(path_test, 'editable_values.csv')
editable_data = pd.read_csv(editable_data_path, header=None, index_col=0, squeeze=True).to_dict()[1]
self.weather_path = weather_path
self.lat = float(editable_data['Latitude'])
self.lon = float(editable_data['Longitude'])
self.altitude = float(editable_data['Altitude']) #SLC altitude m
self.surf_tilt = float(editable_data['solar_tilt']) #panels tilt degree
self.surf_azimuth = float(editable_data['solar_azimuth']) #panels azimuth degree
self.year = year
if AMYs is None:
self.weather_data = pd.read_csv(self.weather_path).reset_index().drop('index', axis = 1)
else:
self.weather_data = pd.read_csv(self.weather_path)
self.weather_data = self.weather_data.rename(columns=self.weather_data.iloc[1]).drop([0,1], axis = 0).reset_index()
def process_gti(self):
if self.AMYs is None:
if self.TMYs is None:
DNI= self.weather_data['dni']
DHI = self.weather_data['dhi']
GHI = self.weather_data['ghi']
dti = pd.date_range(str(self.year)+'-01-01', periods=len(GHI), freq='H')
else:
DNI= self.weather_data['dni']
DHI = self.weather_data['dhi']
GHI = self.weather_data['ghi']
df = pd.DataFrame({'year': self.weather_data['year'],
'month': self.weather_data['month'],
'day': self.weather_data['day'],
'hour': self.weather_data['hour']-1})
else:
DNI= self.weather_data['DNI']
DHI = self.weather_data['DHI']
GHI = self.weather_data['GHI']
df = pd.DataFrame({'year': pd.to_numeric(self.weather_data['Year']),
'month':
|
pd.to_numeric(self.weather_data['Month'])
|
pandas.to_numeric
|
import numpy as np
import pandas as pd
import itertools
import lidg.statistics as st
def q_matrix(order):
n = len(order)
q_mat = np.zeros((n,n))
for i in range(n):
q_mat[order[i],i] = 1.
return q_mat
def get_tolerances(tole1,tole2,m):
ave_val = 1. / np.sqrt(m)
print(f"1/sqrt(m) = {ave_val}")
if tole1 == None:
tole1 = ave_val /100.
print("tole1 is given automatically.")
if tole2 == None:
tole2 = tole1 / 100.
print("tole2 is given automatically.")
print(f"tole1, tole2 = {tole1}, {tole2}\n")
return tole1,tole2
def model_form(y_name,x_name_lis,coe_lis,n_round=3):
n = len(x_name_lis)
form = str(y_name)+" = "
zero = 1.*10**-(n_round+2)
for i in range(n):
if(np.abs(coe_lis[i]) >= zero):
coe = np.round(coe_lis[i],n_round)
form = form + "(" + str(coe) + ")" + str(x_name_lis[i]) + " + "
return form[:-3]
def insertion_sort_2val(s,p1,p2):
# assuming that p1 is more important than p2.
m = len(s)
for i in range(1,m):
if s[i][p1] < s[i-1][p1]:
for j in range(i):
if s[i][p1] < s[j][p1]:
s.insert(j,s.pop(i))
break
elif s[i][p1] == s[j][p1]:
for k in range(j,i):
if s[i][p1] == s[k][p1] and s[i][p2] <= s[k][p2]:
s.insert(k,s.pop(i))
break
return s
def find_subspace(labs,c_mat_t,new_order,rk):
zero = 0.00001
ns = c_mat_t.shape[0]
f_list = [] # solution form list
for i in range(ns):
y_lab = labs[new_order[rk+i]]
sol = c_mat_t[i,:rk]
lab_list = []
for j in range(rk):
x_lab = labs[new_order[j]]
if np.abs(sol[j]) >= zero:
lab_list.append(x_lab)
lab_list = [y_lab,lab_list,1,len(lab_list)]
f_list.append(lab_list)
ss_list = []
for i in range(ns):
if f_list[i][-2] == 1:
for j in range(i+1,ns):
if f_list[i][-3] == f_list[j][-3]:
f_list[i].insert(-3,f_list[j][0])
f_list[i][-2] += 1
f_list[j][-2] = 0
ss_list.append(f_list[i])
ss_list = insertion_sort_2val(ss_list,-1,-2)
nssl = len(ss_list)
print(f"Subspace list: {nssl}")
for i,ssl in enumerate(ss_list):
print(f"{i+1} {ssl[:-2]}")
print("")
def rref(x_in,tole1,tole2):
x = x_in.copy()
m,n = x.shape
order_list = [i for i in range(n)] # the order of columns
for i in range(min(m,n)):
ind_i_max = np.argmax(np.abs(x[i:,i])) + i
x_i_max = x[ind_i_max,i]
pivot = np.abs(x_i_max)
# for complete pivoting
if pivot <= tole1:
max_col_ar = np.max(np.abs(x[i:,i:]),axis=0)
ind_col_max = np.argmax(np.abs(max_col_ar)) + i
sx_max = np.max(np.abs(max_col_ar)) # maximum value in small matrix x[i:,i:]
pivot = np.abs(sx_max)
if pivot <= tole2:
print(f"X is a rank deficient matrix ( pivot ( = {pivot}) < tole2 ( = {tole2}) )")
return x,order_list
else:
ind_row_max = np.argmax(np.abs(x[i:,ind_col_max])) + i
x_row_max = x[ind_row_max,ind_col_max]
order_list.insert(i,order_list.pop(ind_col_max)) # not replace but pop & insert, that is, the order is shifted.
x_col_max = x[:,ind_col_max]
x = np.delete(x,ind_col_max,axis=1)
x = np.insert(x,i,x_col_max,axis=1)
ind_i_max = ind_row_max
x_i_max = x_row_max
tmp = x[ind_i_max].copy()
x[ind_i_max] = x[i]
x[i] = tmp
x[i,i:] = x[i,i:] / x_i_max
for j in range(m):
if not j == i:
xji = x[j,i]
x[j,:] = x[j,:] - xji * x[i,:]
print(f"X is a full rank matrix ( pivot ( = {pivot}) > tole2 ( = {tole2}) )")
return x,order_list
def find_ints(x,tole1,tole2):
# Find independent non-trivial solutions
m,n = x.shape
rk_np = np.linalg.matrix_rank(x) # for rank checking
tole1,tole2 = get_tolerances(tole1,tole2,m)
x_rref,new_order = rref(x,tole1,tole2)
rk_rref = 0
for i in range(min(m,n)):
if x_rref[i,i] == 1.:
rk_rref += 1
print(f"Rank check: rk_np = {rk_np}")
print(f" rk_rref = {rk_rref}")
ns = n - rk_rref
print(f"# of non-trivial solutions: {ns} (= n - rk_rref)")
print(f"Column order:\n {new_order}")
c_up = -x_rref[:rk_rref,rk_rref:]
c_eye = np.eye(ns,ns)
c_mat = np.r_[c_up,c_eye] # solution matrix
return c_mat,new_order,rk_rref
def find_multicollinearity(x_df,normalize,tole1,tole2):
labs = x_df.columns.tolist()
x = x_df.values.copy()
x = np.array(x,dtype="float64")
x_norm,norm = st.normalize(x) # In any case, descriptors are normalized once for increase of calculation accuracy
m,n = x.shape
print(f"Shape: X ({m}, {n})")
c_mat,new_order,rk = find_ints(x_norm,tole1,tole2)
base_list = new_order[:rk]
extr_list = new_order[rk:]
spac_list = extr_list.copy()
spac_list.append(base_list)
print("Space index: [ extra basis, [ basis ] ]")
print(spac_list)
# X' = XD : X' (m,n) is a normalized descriptor matrix.
# D (n,n) is a normalized operator.
# X'_rref = RX'Q : X'_rref (m,n) is a matrix with reduced row echelon form (rref).
# R (m,m) is a elementary row operation.
# Q (n,n) is a elementary column operation (supporse an orthogonal matrix).
# X'_rrefC_rref = 0 : C_rref is a solution matrix.
# (R^-1)X'_rref(Q^-1)QC_rref = 0, then, X'QC_rref = 0,
# the solution of X'C' = 0 is given by QC_rref.
# the solution of XC = 0 is given by DQC_rref.
# In this program, the solutions for XQ or X'Q is calculated (instead of X or X').
# Therefore, C_rref (for normalized coef.) or (Q^-1)DQC_rref (for original coef.) is obtained as shown below.
if normalize: # for normalized coefficients
dc_mat_t = c_mat.T # C_rref^T : solution vectors for X'Q.
else: # for original coefficients
q_mat = q_matrix(new_order) # Q
q_mat_inv = q_mat.T # Q^-1 (Q^T because Q is orthogonal)
qc_mat = np.dot(q_mat,c_mat) # QC_rref
d_mat = np.linalg.inv(np.diag(norm)) # D
dqc_mat = np.dot(d_mat,qc_mat) # DQC_rref
dc_mat = np.dot(q_mat_inv,dqc_mat) # (Q^-1)DQC_rref
dc_mat_t = dc_mat.T # ( (Q^-1)DQC_rref)^T : solution vectors for XQ.
ns = n - rk
# Print correlaiton form
print("\nThe form of multi-correlated descriptors")
for i in range(ns):
y_lab = labs[new_order[rk+i]]
x_labs = []
for j in range(rk):
x_lab = labs[new_order[j]] # X'Q
x_labs.append(x_lab)
form = model_form(y_lab,x_labs,-dc_mat_t[i]/dc_mat_t[i,rk+i])
print(f"{i+1} : {form}")
print("")
# Make subspace list
find_subspace(labs,c_mat.T,new_order,rk)
lid_list = []
for bi in base_list:
blab = labs[bi]
lid_list.append(blab)
print(f"Temporal linearly independent descriptors (LIDs): {rk}")
print(f"{lid_list}\n")
return lid_list
# === Hypervolume calculation ===
def hypervolume(x):
x_norm,norm = st.normalize(x)
vol = np.sqrt(np.linalg.det(np.dot(x_norm.T,x_norm)))
return vol
def make_sub_df(df,sub_list):
sub_df = pd.DataFrame(index=df.index)
for sub in sub_list:
sub_df[sub] = df[sub]
return sub_df
def make_comb_list(can_list,k):
comb_list = list(itertools.combinations(can_list,k))
sub_list = []
com_list = []
for tup in comb_list:
s_list = list(tup)
c_list = []
c_list.extend(can_list)
for t in s_list:
c_list.remove(t)
sub_list.append(s_list)
com_list.append(c_list)
return sub_list,com_list
def hypervolume_comb(df,defined_list,candidate_list,k,ntop):
if ntop == None:
m = len(candidate_list)
ntop = int(st.mCn(m,k))
def_df = make_sub_df(df,defined_list)
can_df = make_sub_df(df,candidate_list)
use_list,unuse_list = make_comb_list(candidate_list,k)
ncomb = len(use_list)
vol_list = []
for i in range(ncomb):
use_df = make_sub_df(df,use_list[i])
base_df = pd.concat([def_df,use_df],axis=1)
base = base_df.values.copy()
vol = hypervolume(base)
vol_list.append([use_list[i],unuse_list[i],vol])
vol_df =
|
pd.DataFrame(vol_list,columns=["Used","Unused","Volume"])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import sys, os
sys.path.append('H:/cloud/cloud_data/Projects/DL/Code/src')
sys.path.append('H:/cloud/cloud_data/Projects/DL/Code/src/ct')
import pandas as pd
import ntpath
import datetime
from openpyxl.worksheet.datavalidation import DataValidation
from openpyxl.formatting.formatting import ConditionalFormattingList
from openpyxl.styles import Font, Color, Border, Side
from openpyxl.styles import Protection
from openpyxl.styles import PatternFill
from glob import glob
from shutil import copyfile
import numpy as np
from collections import defaultdict
from openpyxl.utils import get_column_letter
from CTDataStruct import CTPatient
import keyboard
from openpyxl.styles.differential import DifferentialStyle
from openpyxl.formatting import Rule
from settings import initSettings, saveSettings, loadSettings, fillSettingsTags
from classification import createRFClassification, initRFClassification, classifieRFClassification
from filterTenStepsGuide import filter_CACS_10StepsGuide, filter_CACS, filter_NCS, filterReconstruction, filter_CTA, filer10StepsGuide, filterReconstructionRF
from CTDataStruct import CTPatient, CTImage, CTRef
import SimpleITK as sitk
import matplotlib.pyplot as plt
from glob import glob
def splitFilePath(filepath):
""" Split filepath into folderpath, filename and file extension
:param filepath: Filepath
:type filepath: str
"""
folderpath, _ = ntpath.split(filepath)
head, file_extension = os.path.splitext(filepath)
folderpath, filename = ntpath.split(head)
return folderpath, filename, file_extension
def checkRefereencesAL():
fp = 'H:/cloud/cloud_data/Projects/DISCHARGEMaster/datasets/CACS_20210801_XA/References/'
files = glob(fp + '*-label.nrrd')
filenameList=[]
ratioList=[]
for i,fip in enumerate(files):
print(i)
_, filename, _ = splitFilePath(fip)
ref = CTRef()
ref.load(fip)
N=ref.ref().shape[0]*ref.ref().shape[1]*ref.ref().shape[2]
Nr=(ref.ref()!=0).sum()
ratio = Nr / N
filenameList.append(filename)
ratioList.append(ratio)
def createCACS(settings, name, folderpath, createPreview, createDatasetFromPreview, NumSamples=None):
# Create dataset folder
folderpath_data = os.path.join(folderpath, name)
os.makedirs(folderpath_data, exist_ok=True)
folderpath_preview = os.path.join(folderpath_data, 'preview')
os.makedirs(folderpath_preview, exist_ok=True)
folderpath_dataset = os.path.join(folderpath_data, 'Images')
os.makedirs(folderpath_dataset, exist_ok=True)
filepath_preview = os.path.join(folderpath_preview, 'preview.xlsx')
filepath_preview_refine = os.path.join(folderpath_preview, 'preview_refine.xlsx')
filepath_dataset = os.path.join(folderpath_dataset, name +'.xlsx')
cols = ['ID_CACS', 'PatientID', 'StudyInstanceUID', 'SeriesInstanceUID', 'SeriesNumber', 'Count', 'NumberOfFrames',
'KHK', 'RECO', 'SliceThickness', 'ReconstructionDiameter', 'ConvolutionKernel', 'CACSSelection',
'StudyDate', 'ITT', 'Comment', 'KVP']
cols_master = ['PatientID', 'StudyInstanceUID', 'SeriesInstanceUID', 'SeriesNumber', 'Count', 'NumberOfFrames',
'RECO', 'SliceThickness', 'ReconstructionDiameter', 'ConvolutionKernel', 'StudyDate', 'ITT', 'Comment']
cols_first = ['ID_CACS','CACSSelection', 'PatientID', 'SeriesNumber', 'StudyInstanceUID', 'SeriesInstanceUID']
if createPreview:
# Read master
df_master = pd.read_excel(settings['filepath_master_preview'], sheet_name='MASTER_01092020')
df_preview = pd.DataFrame(columns=cols)
df_preview[cols_master] = df_master[cols_master]
df_preview['KHK'] = 'UNDEFINED'
df_preview['KVP'] = 'UNDEFINED'
df_preview['CACSSelection'] = (df_master['RFCLabel']=='CACS')*1
# Create preview excel
df_preview.reset_index(drop=True, inplace=True)
constrain = list(df_master['RFCLabel']=='CACS')
k=0
ID_CACS = [-1 for i in range(len(constrain))]
for i in range(len(constrain)):
if constrain[i]==True:
ID_CACS[i]="{:04n}".format(k)
k = k + 1
df_preview['ID_CACS'] = ID_CACS
cols_new = cols_first + [x for x in cols if x not in cols_first]
df_preview = df_preview[cols_new]
df_preview.reset_index(inplace=True, drop=True)
df_preview.to_excel(filepath_preview)
# Create preview mhd
filepath_preview_mhd = os.path.join(folderpath_preview, 'preview.mhd')
k_max = k-1
image_preview = np.zeros((k_max,512,512), np.int16)
if NumSamples is None:
NumSamples = len(df_preview)
for index, row in df_preview[0:NumSamples].iterrows():
if int(row['ID_CACS'])>-1:
try:
if index % 100==0:
print('Index:', index)
print('Index:', index)
patient = CTPatient(row['StudyInstanceUID'], row['PatientID'])
series = patient.loadSeries(settings['folderpath_discharge'], row['SeriesInstanceUID'], None)
image = series.image.image()
if image.shape[1]==512:
SliceNum = int(np.round(image.shape[0]*0.7))
image_preview[int(row['ID_CACS']),:,:] = image[SliceNum,:,:]
else:
print('Image size is not 512x512')
print('SeriesInstanceUID', row['SeriesInstanceUID'])
except:
print('Coud not open image:', row['SeriesInstanceUID'])
image_preview_mhd = CTImage()
image_preview_mhd.setImage(image_preview)
image_preview_mhd.save(filepath_preview_mhd)
# Create dataset
if createDatasetFromPreview:
df_cacs =
|
pd.read_excel(filepath_preview_refine)
|
pandas.read_excel
|
from typing import Dict, List, Tuple, Union
import geopandas
import numpy as np
import pandas as pd
from .matches import iter_matches
from .static import ADMINISTRATIVE_DIVISIONS, POSTCODE_MUNICIPALITY_LOOKUP
from .static import df as STATIC_DF
INDEX_COLS = ["municipality", "postcode", "street_nominative", "house_nr"]
def is_valid_idx(x: Tuple[str, str, str, str]):
if not x[0] and not x[1] and not x[2]:
return False
return True
def merge_tuples(
sq: Tuple[
Union[str, slice], Union[str, slice], Union[str, slice], Union[str, slice]
],
res: pd.MultiIndex,
) -> Tuple[str, str, str, str]:
"""Replace tuple values where the index is an empty slice.
Behaviour change in pandas 1.4, in previous versions the full index was returned.
Post 1.4, pandas returns only the missing levels.
:param sq: query tuple
:type sq: Tuple[ Union[str, slice], Union[str, slice], Union[str, slice], Union[str, slice] ]
:param res: index part
:type res: Tuple
:return: Full lookup value
:rtype: Tuple[str, str, str, str]
"""
out = list(sq)
for n in res.names:
idx = INDEX_COLS.index(n)
out[idx] = res.get_level_values(n)[0]
return tuple(out)
def _build_municipality_street_to_postcode(
df: pd.DataFrame,
) -> Dict[Tuple[str, str], str]:
"""Builds a lookup table of
(municipality, street) => postcode
Non unique matches, i.e. a street spanning more than a single postcode are dropped.
:param df: [description]
:type df: pd.DataFrame
:return: [description]
:rtype: Dict[Tuple[str, str], str]
"""
out = {}
delete_list = []
for t, sn, sd, pc in (
df[["municipality", "street_nominative", "street_dative", "postcode"]]
.drop_duplicates()
.values
):
if (t, sn) in out and out[(t, sn)] != str(pc):
delete_list.append((t, sn))
continue
out[(t, sn)] = str(pc)
out[(t, sd)] = str(pc)
for k in delete_list:
out.pop(k, None)
return out
class Lookup:
"""
Utility class for doing reverse geocoding lookups from the dataframe.
How it works:
- The dataframe has a few categorical columns whose code values are used
for constructing a multidimensional search tree.
- When querying, a best-effort approach is used to translate the
input string into a vector to query the tree.
"""
df: pd.DataFrame
town_street_to_postcode: Dict[Tuple[str, str], str]
streets: List[str]
house_nrs: List[str]
postcodes: List[str]
municipalities: List[str]
street_dative: Dict[str, str]
def __init__(self) -> "Lookup":
self.df = STATIC_DF.copy().sort_index()
self.town_street_to_postcode = _build_municipality_street_to_postcode(self.df)
self.streets = self.df.index.levels[2]
self.house_nrs = self.df.index.levels[3]
self.postcodes = self.df.index.levels[1]
self.municipalities = self.df.index.levels[0]
self.street_dative = dict(
self.df[["street_dative", "street_nominative"]]
.reset_index(drop=True)
.values
)
def text_to_vec( # pylint: disable=too-many-branches
self, s: str
) -> Tuple[str, str, str, str]:
"""Builds a tuple out of an address string.
* index 0, category value of the "municipality" category.
* index 1, category value of the "postcode" category.
* index 2, category value of the "street_nominative" category.
* index 3, category value of the "house_nr" category.
:param s: string containing address
:type s: str
:return: Address tuple
:rtype: Tuple[str, str, str, str]
"""
municipality = ""
postcode = ""
street = ""
house_nr = ""
admin_unit = ""
# Exit early if the string is empty
if not s:
return ("", "", "", "")
for w in s.split(" "):
w = w.strip(",.")
if not street and w in self.streets:
street = w
if not house_nr and (w.upper() in self.house_nrs or "-" in w):
house_nr = w
if not postcode and w in self.postcodes and w != house_nr:
postcode = w
municipality = POSTCODE_MUNICIPALITY_LOOKUP.get(int(postcode), "")
if not postcode and not municipality and w in self.municipalities:
municipality = w
if not municipality and w in ADMINISTRATIVE_DIVISIONS:
admin_unit = w
if admin_unit and street:
for tn in ADMINISTRATIVE_DIVISIONS[admin_unit]:
postcode = self.town_street_to_postcode.get((tn, street), "")
if not postcode:
continue
municipality = tn
break
# if we have municipality and street but no postcode, try looking it up
if municipality and street and not postcode:
postcode = self.town_street_to_postcode.get((municipality, street), "")
# Álftanes has a special case
if not postcode and municipality == "Garðabær":
postcode = self.town_street_to_postcode.get(
("Garðabær (Álftanes)", street)
)
if postcode:
municipality = "Garðabær (Álftanes)"
if house_nr and "-" in house_nr:
house_nr = house_nr.split("-")[0]
return (
municipality or "",
postcode or "",
street or "",
(house_nr or "").upper(),
)
def __query_vector_dataframe(self, q: pd.DataFrame) -> pd.DataFrame:
"""Given a data frame with index:
[municipality, postcode, street_nominative, house_nr]
and columns "qidx" (query index) and "order", matches exact and
partial matches to the address dataframe.
:param q: query dataframe
:type q: pd.DataFrame
:return: query dataframe with additional address columns
:rtype: pd.DataFrame
"""
# get intersecting indexes
found = self.df.index.intersection(q.index)
idx_names = self.df.index.names
# find indexes in the query dataframe which couldn't be found,
# these could be empty queries or partial matches.
missing = q.index.difference(found).unique()
if len(missing):
# create a set of the found values, the purpose is to have a mutable
# data structure to work with.
found_missing = set(found.values)
# get unique set of missing queries
miss_df = q.loc[missing].drop_duplicates()
# keep track of query idx that have not been found
not_found_qidx = set()
missing_data = []
miss_df = miss_df.loc[miss_df.index.map(is_valid_idx)]
# as the address dataframe is fairly large, constrict the search
# space to the records loosely matching what's being queried for. For
# large datasets, this speeds up querying considerably.
search_selector = [
slice(None) if (i[0] == "" and len(i) == 1) else i
for i in [
i.values.tolist()
for i in miss_df.index.remove_unused_levels().levels
]
]
search_space = self.df.loc[tuple(search_selector), :]
# iterate rows of valid missing indexes
for tvec, row in miss_df.iterrows():
qidx = row["qidx"]
# the index is 4 levels, [municipality, postcode, street, house_nr],
# all of these values are allowed to be an empty string, except at
# this point it is clear that a key with an empty string could not
# be found in the index.
# Replace all empty strings with a None slice and query the address dataframe
sq = tuple((i or slice(None) for i in tvec))
# NOTE: Author has not founded a vectorized approach to querying the
# source dataframe and matching the query index back with the result.
try:
res = search_space.loc[sq]
except KeyError:
continue
# if an exact match could be found, assign it as the value of the query
# dataframe for the given index. In case there are duplicates, check
# if it's already been found
if len(res) == 1:
# mark the returned tuple for addition to the found indexes
res_val = merge_tuples(sq, res.index)
found_missing.add(res_val)
# create a new row for the missing data
missing_data.append(res_val + tuple(row))
# mark old data query index for deletion
not_found_qidx.add(qidx)
# NOTE: here there are multiple matches, theoretically possible to train
# a model which would give higher priority to a generic address determined
# by its frequency over a corpus.
# delete found qidx from the original query frame
q = q[~q["qidx"].isin(not_found_qidx)]
# concat the missing data found with the query data frame
q = pd.concat(
[
q,
pd.DataFrame(
missing_data, columns=idx_names + q.columns.tolist()
).set_index(idx_names),
]
)
# rebuild the multiindex after mutating it
found = pd.MultiIndex.from_tuples(list(found_missing), names=idx_names)
# select indexable records, right join with the query dataframe
# and sort by the original query order.
out = self.df.loc[found].join(q, how="right").sort_values("order")
# fill NaN string values
out[
[
"municipality",
"postcode",
"special_name",
"house_nr",
"street_dative",
"street_nominative",
]
] = out[
[
"municipality",
"postcode",
"special_name",
"house_nr",
"street_dative",
"street_nominative",
]
].fillna(
value=""
)
out.reset_index(level=[0, 1, 2, 3], drop=True, inplace=True)
return out
def query_dataframe(
self,
q: pd.DataFrame,
) -> pd.DataFrame:
"""Queries a data frame containing structued data,
columns [postcode, house_nr, street/street_nominative] are
required, [municipality] is optional.
:param q: query dataframe
:type q: pd.DataFrame
:return: query dataframe with additional address columns
:rtype: pd.DataFrame
"""
cols = q.columns
q["postcode"] = q["postcode"].astype(str)
if "municipality" not in cols:
q["municipality"] = q["postcode"].apply(
lambda pc: POSTCODE_MUNICIPALITY_LOOKUP.get(
int(pc) if pc.isdigit() else -1, ""
)
)
q["house_nr"] = q["house_nr"].astype(str)
if "street" in cols and "street_nominative" not in cols:
q = q.rename(columns={"street": "street_nominative"})
q["street_nominative"] = q["street_nominative"].apply(
lambda v: self.street_dative.get(v, v)
)
q["qidx"] = pd.Categorical(
q[self.df.index.names].apply(
lambda x: "/".join(x.dropna().astype(str).values), axis=1
)
).codes
q["order"] = list(range(len(q)))
q = q.set_index(keys=self.df.index.names)
return self.__query_vector_dataframe(q)
def query( # pylint: disable=too-many-locals
self, text: Union[str, List[str], np.ndarray]
) -> geopandas.GeoDataFrame:
"""Given text input, returns a dataframe with matching addresses
:param text: string containing a single address or an iterator
containing multiple addresses.
:type text: Union[str, List[str], np.ndarray]
:return: Data frame containg addresses
:rtype: geopandas.GeoDataFrame
"""
if isinstance(text, str):
text = [text]
# strip whitespace from text
text = [t.strip() for t in text]
# tokenize strings into a list of tuples,
# [municipality, postcode, street_nominative, house_nr]
vecs = [self.text_to_vec(t) for t in text]
# construct dataframe from parsed results
q = pd.DataFrame(vecs, columns=self.df.index.names)
# Set original search query and idx of the query
q["query"] = text
# there might be duplicated values, cast the query as a category
# this is used as the id of the query
q["qidx"] = q["query"].astype("category").cat.codes
# keep the original order of the query
q["order"] = list(range(len(text)))
# set the tokenized vector of
# [municipality, postcode, street_nominative, house_nr] as the index
q = q.set_index(keys=self.df.index.names)
return self.__query_vector_dataframe(q)
def query_text_body(self, text: str) -> pd.DataFrame:
"""Queries a body of text.
This is a special case API for parsing multiple addresses from
a block of text.
:param text: block of text
:type text: str
:return: Data frame containg addresses
:rtype: pd.DataFrame
"""
arr = []
for m in iter_matches(text):
if not m: # pragma: no cover
break
arr.append(m)
df =
|
pd.DataFrame(arr)
|
pandas.DataFrame
|
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = pd.Index(['g1', 'g1', 'g2', 'g2'])
wrapper = vbt.ArrayWrapper(
index=['x', 'y', 'z'],
columns=['a', 'b', 'c', 'd'],
ndim=2,
freq='1 days'
)
wrapper_grouped = wrapper.replace(group_by=group_by)
records = vbt.records.Records(wrapper, records_arr)
records_grouped = vbt.records.Records(wrapper_grouped, records_arr)
records_nosort = records.replace(records_arr=records_nosort_arr)
records_nosort_grouped = vbt.records.Records(wrapper_grouped, records_nosort_arr)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# col_mapper.py ############# #
class TestColumnMapper:
def test_col_arr(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
records.col_mapper.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_get_col_arr(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_arr(),
records.col_mapper.col_arr
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
)
def test_col_range(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_range,
np.array([
[0, 3]
])
)
np.testing.assert_array_equal(
records.col_mapper.col_range,
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
def test_get_col_range(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_range(),
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_range(),
np.array([[0, 6]])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_range(),
np.array([[0, 6], [6, 9]])
)
def test_col_map(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[0],
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[1],
np.array([3])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[1],
np.array([3, 3, 3, 0])
)
def test_get_col_map(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[0],
records.col_mapper.col_map[0]
)
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[1],
records.col_mapper.col_map[1]
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[1],
np.array([6])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[1],
np.array([6, 3])
)
def test_is_sorted(self):
assert records.col_mapper.is_sorted()
assert not records_nosort.col_mapper.is_sorted()
# ############# mapped_array.py ############# #
mapped_array = records.map_field('some_field1')
mapped_array_grouped = records_grouped.map_field('some_field1')
mapped_array_nosort = records_nosort.map_field('some_field1')
mapped_array_nosort_grouped = records_nosort_grouped.map_field('some_field1')
mapping = {x: 'test_' + str(x) for x in pd.unique(mapped_array.values)}
mp_mapped_array = mapped_array.replace(mapping=mapping)
mp_mapped_array_grouped = mapped_array_grouped.replace(mapping=mapping)
class TestMappedArray:
def test_config(self, tmp_path):
assert vbt.MappedArray.loads(mapped_array.dumps()) == mapped_array
mapped_array.save(tmp_path / 'mapped_array')
assert vbt.MappedArray.load(tmp_path / 'mapped_array') == mapped_array
def test_mapped_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
mapped_array.values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
def test_id_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.id_arr,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
def test_col_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
mapped_array.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_idx_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].idx_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.idx_arr,
np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
)
def test_is_sorted(self):
assert mapped_array.is_sorted()
assert mapped_array.is_sorted(incl_id=True)
assert not mapped_array_nosort.is_sorted()
assert not mapped_array_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert mapped_array.sort().is_sorted()
assert mapped_array.sort().is_sorted(incl_id=True)
assert mapped_array.sort(incl_id=True).is_sorted(incl_id=True)
assert mapped_array_nosort.sort().is_sorted()
assert mapped_array_nosort.sort().is_sorted(incl_id=True)
assert mapped_array_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = mapped_array['a'].values >= mapped_array['a'].values.mean()
np.testing.assert_array_equal(
mapped_array['a'].apply_mask(mask_a).id_arr,
np.array([1, 2])
)
mask = mapped_array.values >= mapped_array.values.mean()
filtered = mapped_array.apply_mask(mask)
np.testing.assert_array_equal(
filtered.id_arr,
np.array([2, 3, 4, 5, 6])
)
np.testing.assert_array_equal(filtered.col_arr, mapped_array.col_arr[mask])
np.testing.assert_array_equal(filtered.idx_arr, mapped_array.idx_arr[mask])
assert mapped_array_grouped.apply_mask(mask).wrapper == mapped_array_grouped.wrapper
assert mapped_array_grouped.apply_mask(mask, group_by=False).wrapper.grouper.group_by is None
def test_map_to_mask(self):
@njit
def every_2_nb(inout, idxs, col, mapped_arr):
inout[idxs[::2]] = True
np.testing.assert_array_equal(
mapped_array.map_to_mask(every_2_nb),
np.array([True, False, True, True, False, True, True, False, True])
)
def test_top_n_mask(self):
np.testing.assert_array_equal(
mapped_array.top_n_mask(1),
np.array([False, False, True, False, True, False, True, False, False])
)
def test_bottom_n_mask(self):
np.testing.assert_array_equal(
mapped_array.bottom_n_mask(1),
np.array([True, False, False, True, False, False, False, False, True])
)
def test_top_n(self):
np.testing.assert_array_equal(
mapped_array.top_n(1).id_arr,
np.array([2, 4, 6])
)
def test_bottom_n(self):
np.testing.assert_array_equal(
mapped_array.bottom_n(1).id_arr,
np.array([0, 3, 8])
)
def test_to_pd(self):
target = pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
index=wrapper.index,
columns=wrapper.columns
)
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(),
target['a']
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(),
target
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0.),
target.fillna(0.)
)
mapped_array2 = vbt.MappedArray(
wrapper,
records_arr['some_field1'].tolist() + [1],
records_arr['col'].tolist() + [2],
idx_arr=records_arr['idx'].tolist() + [2]
)
with pytest.raises(Exception):
_ = mapped_array2.to_pd()
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(ignore_index=True),
pd.Series(np.array([10., 11., 12.]), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0, ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., 0.],
[11., 14., 11., 0.],
[12., 13., 10., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 12.],
[11., 11.],
[12., 10.],
[13., np.nan],
[14., np.nan],
[13., np.nan],
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_apply(self):
@njit
def cumsum_apply_nb(idxs, col, a):
return np.cumsum(a)
np.testing.assert_array_equal(
mapped_array['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
mapped_array.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert mapped_array_grouped.apply(cumsum_apply_nb).wrapper == \
mapped_array.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert mapped_array.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_reduce(self):
@njit
def mean_reduce_nb(col, a):
return np.mean(a)
assert mapped_array['a'].reduce(mean_reduce_nb) == 11.
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0.),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0., wrap_kwargs=dict(dtype=np.int_)),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, wrap_kwargs=dict(to_timedelta=True)),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(mean_reduce_nb),
pd.Series([12.166666666666666, 11.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
assert mapped_array_grouped['g1'].reduce(mean_reduce_nb) == 12.166666666666666
pd.testing.assert_series_equal(
mapped_array_grouped[['g1']].reduce(mean_reduce_nb),
pd.Series([12.166666666666666], index=pd.Index(['g1'], dtype='object')).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
mapped_array_grouped.reduce(mean_reduce_nb, group_by=False)
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, group_by=group_by),
mapped_array_grouped.reduce(mean_reduce_nb)
)
def test_reduce_to_idx(self):
@njit
def argmin_reduce_nb(col, a):
return np.argmin(a)
assert mapped_array['a'].reduce(argmin_reduce_nb, returns_idx=True) == 'x'
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True),
pd.Series(np.array(['x', 'x', 'z', np.nan], dtype=object), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 0, 2, -1], dtype=int), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 2], dtype=int), index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
def test_reduce_to_array(self):
@njit
def min_max_reduce_nb(col, a):
return np.array([np.min(a), np.max(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(min_max_reduce_nb, returns_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([10., 12.], index=pd.Index(['min', 'max'], dtype='object'), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
index=pd.Index(['min', 'max'], dtype='object'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, fill_value=0.),
pd.DataFrame(
np.array([
[10., 13., 10., 0.],
[12., 14., 12., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(to_timedelta=True)),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
columns=wrapper.columns
) * day_dt
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame(
np.array([
[10., 10.],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True, group_by=False)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, group_by=group_by),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g1'].reduce(min_max_reduce_nb, returns_array=True),
pd.Series([10., 14.], name='g1')
)
pd.testing.assert_frame_equal(
mapped_array_grouped[['g1']].reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame([[10.], [14.]], columns=pd.Index(['g1'], dtype='object'))
)
def test_reduce_to_idx_array(self):
@njit
def idxmin_idxmax_reduce_nb(col, a):
return np.array([np.argmin(a), np.argmax(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['min', 'max'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.DataFrame(
{
'a': ['x', 'z'],
'b': ['x', 'y'],
'c': ['z', 'x'],
'd': [np.nan, np.nan]
},
index=pd.Index(['min', 'max'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 0, 2, -1],
[2, 1, 0, -1]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 2],
[1, 0]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_nth(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth(0),
pd.Series(np.array([10., 13., 12., np.nan]), index=wrapper.columns).rename('nth')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth(-1),
pd.Series(np.array([12., 13., 10., np.nan]), index=wrapper.columns).rename('nth')
)
with pytest.raises(Exception):
_ = mapped_array.nth(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth(0),
pd.Series(np.array([10., 12.]), index=pd.Index(['g1', 'g2'], dtype='object')).rename('nth')
)
def test_nth_index(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth_index(0),
pd.Series(
np.array(['x', 'x', 'x', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth_index(-1),
pd.Series(
np.array(['z', 'z', 'z', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
with pytest.raises(Exception):
_ = mapped_array.nth_index(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth_index(0),
pd.Series(
np.array(['x', 'x'], dtype='object'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('nth_index')
)
def test_min(self):
assert mapped_array['a'].min() == mapped_array['a'].to_pd().min()
pd.testing.assert_series_equal(
mapped_array.min(),
mapped_array.to_pd().min().rename('min')
)
pd.testing.assert_series_equal(
mapped_array_grouped.min(),
pd.Series([10., 10.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('min')
)
def test_max(self):
assert mapped_array['a'].max() == mapped_array['a'].to_pd().max()
pd.testing.assert_series_equal(
mapped_array.max(),
mapped_array.to_pd().max().rename('max')
)
pd.testing.assert_series_equal(
mapped_array_grouped.max(),
pd.Series([14., 12.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('max')
)
def test_mean(self):
assert mapped_array['a'].mean() == mapped_array['a'].to_pd().mean()
pd.testing.assert_series_equal(
mapped_array.mean(),
mapped_array.to_pd().mean().rename('mean')
)
pd.testing.assert_series_equal(
mapped_array_grouped.mean(),
pd.Series([12.166667, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('mean')
)
def test_median(self):
assert mapped_array['a'].median() == mapped_array['a'].to_pd().median()
pd.testing.assert_series_equal(
mapped_array.median(),
mapped_array.to_pd().median().rename('median')
)
pd.testing.assert_series_equal(
mapped_array_grouped.median(),
pd.Series([12.5, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('median')
)
def test_std(self):
assert mapped_array['a'].std() == mapped_array['a'].to_pd().std()
pd.testing.assert_series_equal(
mapped_array.std(),
mapped_array.to_pd().std().rename('std')
)
pd.testing.assert_series_equal(
mapped_array.std(ddof=0),
mapped_array.to_pd().std(ddof=0).rename('std')
)
pd.testing.assert_series_equal(
mapped_array_grouped.std(),
pd.Series([1.4719601443879746, 1.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('std')
)
def test_sum(self):
assert mapped_array['a'].sum() == mapped_array['a'].to_pd().sum()
pd.testing.assert_series_equal(
mapped_array.sum(),
mapped_array.to_pd().sum().rename('sum')
)
pd.testing.assert_series_equal(
mapped_array_grouped.sum(),
pd.Series([73.0, 33.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('sum')
)
def test_count(self):
assert mapped_array['a'].count() == mapped_array['a'].to_pd().count()
pd.testing.assert_series_equal(
mapped_array.count(),
mapped_array.to_pd().count().rename('count')
)
pd.testing.assert_series_equal(
mapped_array_grouped.count(),
pd.Series([6, 3], index=pd.Index(['g1', 'g2'], dtype='object')).rename('count')
)
def test_idxmin(self):
assert mapped_array['a'].idxmin() == mapped_array['a'].to_pd().idxmin()
pd.testing.assert_series_equal(
mapped_array.idxmin(),
mapped_array.to_pd().idxmin().rename('idxmin')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmin(),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmin')
)
def test_idxmax(self):
assert mapped_array['a'].idxmax() == mapped_array['a'].to_pd().idxmax()
pd.testing.assert_series_equal(
mapped_array.idxmax(),
mapped_array.to_pd().idxmax().rename('idxmax')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmax(),
pd.Series(
np.array(['y', 'x'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmax')
)
def test_describe(self):
pd.testing.assert_series_equal(
mapped_array['a'].describe(),
mapped_array['a'].to_pd().describe()
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=None),
mapped_array.to_pd().describe(percentiles=None)
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=[]),
mapped_array.to_pd().describe(percentiles=[])
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=np.arange(0, 1, 0.1)),
mapped_array.to_pd().describe(percentiles=np.arange(0, 1, 0.1))
)
pd.testing.assert_frame_equal(
mapped_array_grouped.describe(),
pd.DataFrame(
np.array([
[6., 3.],
[12.16666667, 11.],
[1.47196014, 1.],
[10., 10.],
[11.25, 10.5],
[12.5, 11.],
[13., 11.5],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object'),
index=mapped_array.describe().index
)
)
def test_value_counts(self):
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(),
pd.Series(
np.array([1, 1, 1]),
index=pd.Float64Index([10.0, 11.0, 12.0], dtype='float64'),
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(mapping=mapping),
pd.Series(
np.array([1, 1, 1]),
index=pd.Index(['test_10.0', 'test_11.0', 'test_12.0'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.value_counts(),
pd.DataFrame(
np.array([
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 0, 1, 0],
[0, 2, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.value_counts(),
pd.DataFrame(
np.array([
[1, 1],
[1, 1],
[1, 1],
[2, 0],
[1, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
mapped_array2 = mapped_array.replace(mapped_arr=[4, 4, 3, 2, np.nan, 4, 3, 2, 1])
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=False),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 3.0, 2.0, 1.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([1.0, 2.0, 3.0, 4.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, ascending=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0]
]),
index=pd.Float64Index([1.0, np.nan, 2.0, 3.0, 4.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True),
pd.DataFrame(
np.array([
[0.2222222222222222, 0.1111111111111111, 0.0, 0.0],
[0.0, 0.1111111111111111, 0.1111111111111111, 0.0],
[0.1111111111111111, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.1111111111111111, 0.0, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True, dropna=True),
pd.DataFrame(
np.array([
[0.25, 0.125, 0.0, 0.0],
[0.0, 0.125, 0.125, 0.0],
[0.125, 0.0, 0.125, 0.0],
[0.0, 0.0, 0.125, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0], dtype='float64'),
columns=wrapper.columns
)
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
ma = mapped_array_nosort
ma_grouped = mapped_array_nosort_grouped
else:
ma = mapped_array
ma_grouped = mapped_array_grouped
np.testing.assert_array_equal(
ma['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
ma['a'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
np.testing.assert_array_equal(
ma['b'].id_arr,
np.array([3, 4, 5])
)
np.testing.assert_array_equal(
ma['b'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'a']].id_arr,
np.array([0, 1, 2, 0, 1, 2])
)
np.testing.assert_array_equal(
ma[['a', 'a']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'b']].id_arr,
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
ma[['a', 'b']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = ma.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped['g1'].wrapper.ndim == 2
assert ma_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert ma_grouped['g2'].wrapper.ndim == 2
assert ma_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped[['g1']].wrapper.ndim == 2
assert ma_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert ma_grouped[['g1', 'g2']].wrapper.ndim == 2
assert ma_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_magic(self):
a = vbt.MappedArray(
wrapper,
records_arr['some_field1'],
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
a_inv = vbt.MappedArray(
wrapper,
records_arr['some_field1'][::-1],
records_arr['col'][::-1],
id_arr=records_arr['id'][::-1],
idx_arr=records_arr['idx'][::-1]
)
b = records_arr['some_field2']
a_bool = vbt.MappedArray(
wrapper,
records_arr['some_field1'] > np.mean(records_arr['some_field1']),
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
b_bool = records_arr['some_field2'] > np.mean(records_arr['some_field2'])
assert a ** a == a ** 2
with pytest.raises(Exception):
_ = a * a_inv
# binary ops
# comparison ops
np.testing.assert_array_equal((a == b).values, a.values == b)
np.testing.assert_array_equal((a != b).values, a.values != b)
np.testing.assert_array_equal((a < b).values, a.values < b)
np.testing.assert_array_equal((a > b).values, a.values > b)
np.testing.assert_array_equal((a <= b).values, a.values <= b)
np.testing.assert_array_equal((a >= b).values, a.values >= b)
# arithmetic ops
np.testing.assert_array_equal((a + b).values, a.values + b)
np.testing.assert_array_equal((a - b).values, a.values - b)
np.testing.assert_array_equal((a * b).values, a.values * b)
np.testing.assert_array_equal((a ** b).values, a.values ** b)
np.testing.assert_array_equal((a % b).values, a.values % b)
np.testing.assert_array_equal((a // b).values, a.values // b)
np.testing.assert_array_equal((a / b).values, a.values / b)
# __r*__ is only called if the left object does not have an __*__ method
np.testing.assert_array_equal((10 + a).values, 10 + a.values)
np.testing.assert_array_equal((10 - a).values, 10 - a.values)
np.testing.assert_array_equal((10 * a).values, 10 * a.values)
np.testing.assert_array_equal((10 ** a).values, 10 ** a.values)
np.testing.assert_array_equal((10 % a).values, 10 % a.values)
np.testing.assert_array_equal((10 // a).values, 10 // a.values)
np.testing.assert_array_equal((10 / a).values, 10 / a.values)
# mask ops
np.testing.assert_array_equal((a_bool & b_bool).values, a_bool.values & b_bool)
np.testing.assert_array_equal((a_bool | b_bool).values, a_bool.values | b_bool)
np.testing.assert_array_equal((a_bool ^ b_bool).values, a_bool.values ^ b_bool)
np.testing.assert_array_equal((True & a_bool).values, True & a_bool.values)
np.testing.assert_array_equal((True | a_bool).values, True | a_bool.values)
np.testing.assert_array_equal((True ^ a_bool).values, True ^ a_bool.values)
# unary ops
np.testing.assert_array_equal((-a).values, -a.values)
np.testing.assert_array_equal((+a).values, +a.values)
np.testing.assert_array_equal((abs(-a)).values, abs((-a.values)))
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Mean', 'Std', 'Min', 'Median', 'Max', 'Min Index', 'Max Index'
], dtype='object')
pd.testing.assert_series_equal(
mapped_array.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
2.25, 11.777777777777779, 0.859116756396542, 11.0, 11.666666666666666, 12.666666666666666
],
index=stats_index[:-2],
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
3, 11.0, 1.0, 10.0, 11.0, 12.0, 'x', 'z'
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
6, 12.166666666666666, 1.4719601443879746, 10.0, 12.5, 14.0, 'x', 'y'
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
def test_stats_mapping(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Value Counts: test_10.0',
'Value Counts: test_11.0', 'Value Counts: test_12.0',
'Value Counts: test_13.0', 'Value Counts: test_14.0'
], dtype='object')
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
2.25, 0.5, 0.5, 0.5, 0.5, 0.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='a'),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
3, 1, 1, 1, 0, 0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
6, 1, 1, 1, 2, 1
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
mapped_array.stats(settings=dict(mapping=mapping))
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mp_mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 9)
pd.testing.assert_index_equal(stats_df.index, mp_mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# base.py ############# #
class TestRecords:
def test_config(self, tmp_path):
assert vbt.Records.loads(records['a'].dumps()) == records['a']
assert vbt.Records.loads(records.dumps()) == records
records.save(tmp_path / 'records')
assert vbt.Records.load(tmp_path / 'records') == records
def test_records(self):
pd.testing.assert_frame_equal(
records.records,
pd.DataFrame.from_records(records_arr)
)
def test_recarray(self):
np.testing.assert_array_equal(records['a'].recarray.some_field1, records['a'].values['some_field1'])
np.testing.assert_array_equal(records.recarray.some_field1, records.values['some_field1'])
def test_records_readable(self):
pd.testing.assert_frame_equal(
records.records_readable,
pd.DataFrame([
[0, 'a', 'x', 10.0, 21.0], [1, 'a', 'y', 11.0, 20.0], [2, 'a', 'z', 12.0, 19.0],
[3, 'b', 'x', 13.0, 18.0], [4, 'b', 'y', 14.0, 17.0], [5, 'b', 'z', 13.0, 18.0],
[6, 'c', 'x', 12.0, 19.0], [7, 'c', 'y', 11.0, 20.0], [8, 'c', 'z', 10.0, 21.0]
], columns=pd.Index(['Id', 'Column', 'Timestamp', 'some_field1', 'some_field2'], dtype='object'))
)
def test_is_sorted(self):
assert records.is_sorted()
assert records.is_sorted(incl_id=True)
assert not records_nosort.is_sorted()
assert not records_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert records.sort().is_sorted()
assert records.sort().is_sorted(incl_id=True)
assert records.sort(incl_id=True).is_sorted(incl_id=True)
assert records_nosort.sort().is_sorted()
assert records_nosort.sort().is_sorted(incl_id=True)
assert records_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = records['a'].values['some_field1'] >= records['a'].values['some_field1'].mean()
record_arrays_close(
records['a'].apply_mask(mask_a).values,
np.array([
(1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
mask = records.values['some_field1'] >= records.values['some_field1'].mean()
filtered = records.apply_mask(mask)
record_arrays_close(
filtered.values,
np.array([
(2, 0, 2, 12., 19.), (3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.),
(5, 1, 2, 13., 18.), (6, 2, 0, 12., 19.)
], dtype=example_dt)
)
assert records_grouped.apply_mask(mask).wrapper == records_grouped.wrapper
def test_map_field(self):
np.testing.assert_array_equal(
records['a'].map_field('some_field1').values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
records.map_field('some_field1').values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
assert records_grouped.map_field('some_field1').wrapper == \
records.map_field('some_field1', group_by=group_by).wrapper
assert records_grouped.map_field('some_field1', group_by=False).wrapper.grouper.group_by is None
def test_map(self):
@njit
def map_func_nb(record):
return record['some_field1'] + record['some_field2']
np.testing.assert_array_equal(
records['a'].map(map_func_nb).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map(map_func_nb).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map(map_func_nb).wrapper == \
records.map(map_func_nb, group_by=group_by).wrapper
assert records_grouped.map(map_func_nb, group_by=False).wrapper.grouper.group_by is None
def test_map_array(self):
arr = records_arr['some_field1'] + records_arr['some_field2']
np.testing.assert_array_equal(
records['a'].map_array(arr[:3]).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map_array(arr).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map_array(arr).wrapper == \
records.map_array(arr, group_by=group_by).wrapper
assert records_grouped.map_array(arr, group_by=False).wrapper.grouper.group_by is None
def test_apply(self):
@njit
def cumsum_apply_nb(records):
return np.cumsum(records['some_field1'])
np.testing.assert_array_equal(
records['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
records.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
records_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert records_grouped.apply(cumsum_apply_nb).wrapper == \
records.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert records_grouped.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_count(self):
assert records['a'].count() == 3
pd.testing.assert_series_equal(
records.count(),
pd.Series(
np.array([3, 3, 3, 0]),
index=wrapper.columns
).rename('count')
)
assert records_grouped['g1'].count() == 6
pd.testing.assert_series_equal(
records_grouped.count(),
pd.Series(
np.array([6, 3]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('count')
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
r = records_nosort
r_grouped = records_nosort_grouped
else:
r = records
r_grouped = records_grouped
record_arrays_close(
r['a'].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
pd.testing.assert_index_equal(
r['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
record_arrays_close(
r[['a', 'a']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(0, 1, 0, 10., 21.), (1, 1, 1, 11., 20.), (2, 1, 2, 12., 19.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
record_arrays_close(
r[['a', 'b']].values,
np.array([
(0, 0, 0, 10., 21.), (1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.),
(3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.), (5, 1, 2, 13., 18.)
], dtype=example_dt)
)
pd.testing.assert_index_equal(
r[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = r.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped['g1'].wrapper.ndim == 2
assert r_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert r_grouped['g2'].wrapper.ndim == 2
assert r_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
r_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert r_grouped[['g1']].wrapper.ndim == 2
assert r_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert r_grouped[['g1', 'g2']].wrapper.ndim == 2
assert r_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
r_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_filtering(self):
filtered_records = vbt.Records(wrapper, records_arr[[0, -1]])
record_arrays_close(
filtered_records.values,
np.array([(0, 0, 0, 10., 21.), (8, 2, 2, 10., 21.)], dtype=example_dt)
)
# a
record_arrays_close(
filtered_records['a'].values,
np.array([(0, 0, 0, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['a'].map_field('some_field1').id_arr,
np.array([0])
)
assert filtered_records['a'].map_field('some_field1').min() == 10.
assert filtered_records['a'].count() == 1.
# b
record_arrays_close(
filtered_records['b'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['b'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['b'].map_field('some_field1').min())
assert filtered_records['b'].count() == 0.
# c
record_arrays_close(
filtered_records['c'].values,
np.array([(8, 0, 2, 10., 21.)], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['c'].map_field('some_field1').id_arr,
np.array([8])
)
assert filtered_records['c'].map_field('some_field1').min() == 10.
assert filtered_records['c'].count() == 1.
# d
record_arrays_close(
filtered_records['d'].values,
np.array([], dtype=example_dt)
)
np.testing.assert_array_equal(
filtered_records['d'].map_field('some_field1').id_arr,
np.array([])
)
assert np.isnan(filtered_records['d'].map_field('some_field1').min())
assert filtered_records['d'].count() == 0.
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count'
], dtype='object')
pd.testing.assert_series_equal(
records.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 2.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
records.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 3
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
records.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'), 6
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c')
)
pd.testing.assert_series_equal(
records['c'].stats(),
records.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
records_grouped['g2'].stats(),
records.stats(column='g2', group_by=group_by)
)
stats_df = records.stats(agg_func=None)
assert stats_df.shape == (4, 4)
pd.testing.assert_index_equal(stats_df.index, records.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# ranges.py ############# #
ts = pd.DataFrame({
'a': [1, -1, 3, -1, 5, -1],
'b': [-1, -1, -1, 4, 5, 6],
'c': [1, 2, 3, -1, -1, -1],
'd': [-1, -1, -1, -1, -1, -1]
}, index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6)
])
ranges = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days'))
ranges_grouped = vbt.Ranges.from_ts(ts, wrapper_kwargs=dict(freq='1 days', group_by=group_by))
class TestRanges:
def test_mapped_fields(self):
for name in range_dt.names:
np.testing.assert_array_equal(
getattr(ranges, name).values,
ranges.values[name]
)
def test_from_ts(self):
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 1, 1), (1, 0, 2, 3, 1), (2, 0, 4, 5, 1), (3, 1, 3, 5, 0), (4, 2, 0, 3, 1)
], dtype=range_dt)
)
assert ranges.wrapper.freq == day_dt
pd.testing.assert_index_equal(
ranges_grouped.wrapper.grouper.group_by,
group_by
)
def test_records_readable(self):
records_readable = ranges.records_readable
np.testing.assert_array_equal(
records_readable['Range Id'].values,
np.array([
0, 1, 2, 3, 4
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'b', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Start Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-01T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['End Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-06T00:00:00.000000000',
'2020-01-04T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Closed', 'Closed', 'Closed', 'Open', 'Closed'
])
)
def test_to_mask(self):
pd.testing.assert_series_equal(
ranges['a'].to_mask(),
ts['a'] != -1
)
pd.testing.assert_frame_equal(
ranges.to_mask(),
ts != -1
)
pd.testing.assert_frame_equal(
ranges_grouped.to_mask(),
pd.DataFrame(
[
[True, True],
[False, True],
[True, True],
[True, False],
[True, False],
[True, False]
],
index=ts.index,
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_duration(self):
np.testing.assert_array_equal(
ranges['a'].duration.values,
np.array([1, 1, 1])
)
np.testing.assert_array_equal(
ranges.duration.values,
np.array([1, 1, 1, 3, 3])
)
def test_avg_duration(self):
assert ranges['a'].avg_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.avg_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('avg_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.avg_duration(),
pd.Series(
np.array([129600000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_duration')
)
def test_max_duration(self):
assert ranges['a'].max_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
ranges.max_duration(),
pd.Series(
np.array([86400000000000, 259200000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('max_duration')
)
pd.testing.assert_series_equal(
ranges_grouped.max_duration(),
pd.Series(
np.array([259200000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_duration')
)
def test_coverage(self):
assert ranges['a'].coverage() == 0.5
pd.testing.assert_series_equal(
ranges.coverage(),
pd.Series(
np.array([0.5, 0.5, 0.5, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(),
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage()
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True),
pd.Series(
np.array([1.0, 1.0, 1.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.coverage(normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges.replace(records_arr=np.repeat(ranges.values, 2)).coverage(overlapping=True, normalize=False),
pd.Series(
np.array([3.0, 3.0, 3.0, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
pd.Series(
np.array([0.4166666666666667, 0.25]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('coverage')
)
pd.testing.assert_series_equal(
ranges_grouped.coverage(),
ranges_grouped.replace(records_arr=np.repeat(ranges_grouped.values, 2)).coverage()
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Coverage', 'Overlap Coverage',
'Total Records', 'Duration: Min', 'Duration: Median', 'Duration: Max',
'Duration: Mean', 'Duration: Std'
], dtype='object')
pd.testing.assert_series_equal(
ranges.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'), 1.25, pd.Timedelta('2 days 08:00:00'),
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('2 days 08:00:00'),
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('0 days 00:00:00')
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
ranges.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'), 3, pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('0 days 00:00:00')
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
ranges.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), pd.Timedelta('5 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), 4, pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('1 days 12:00:00'), pd.Timedelta('1 days 00:00:00')
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
ranges['c'].stats(),
ranges.stats(column='c')
)
pd.testing.assert_series_equal(
ranges['c'].stats(),
ranges.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
ranges_grouped['g2'].stats(),
ranges_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
ranges_grouped['g2'].stats(),
ranges.stats(column='g2', group_by=group_by)
)
stats_df = ranges.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, ranges.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# drawdowns.py ############# #
ts2 = pd.DataFrame({
'a': [2, 1, 3, 1, 4, 1],
'b': [1, 2, 1, 3, 1, 4],
'c': [1, 2, 3, 2, 1, 2],
'd': [1, 2, 3, 4, 5, 6]
}, index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6)
])
drawdowns = vbt.Drawdowns.from_ts(ts2, wrapper_kwargs=dict(freq='1 days'))
drawdowns_grouped = vbt.Drawdowns.from_ts(ts2, wrapper_kwargs=dict(freq='1 days', group_by=group_by))
class TestDrawdowns:
def test_mapped_fields(self):
for name in drawdown_dt.names:
np.testing.assert_array_equal(
getattr(drawdowns, name).values,
drawdowns.values[name]
)
def test_ts(self):
pd.testing.assert_frame_equal(
drawdowns.ts,
ts2
)
pd.testing.assert_series_equal(
drawdowns['a'].ts,
ts2['a']
)
pd.testing.assert_frame_equal(
drawdowns_grouped['g1'].ts,
ts2[['a', 'b']]
)
assert drawdowns.replace(ts=None)['a'].ts is None
def test_from_ts(self):
record_arrays_close(
drawdowns.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1),
(2, 0, 4, 5, 5, 5, 4.0, 1.0, 1.0, 0), (3, 1, 1, 2, 2, 3, 2.0, 1.0, 3.0, 1),
(4, 1, 3, 4, 4, 5, 3.0, 1.0, 4.0, 1), (5, 2, 2, 3, 4, 5, 3.0, 1.0, 2.0, 0)
], dtype=drawdown_dt)
)
assert drawdowns.wrapper.freq == day_dt
pd.testing.assert_index_equal(
drawdowns_grouped.wrapper.grouper.group_by,
group_by
)
def test_records_readable(self):
records_readable = drawdowns.records_readable
np.testing.assert_array_equal(
records_readable['Drawdown Id'].values,
np.array([
0, 1, 2, 3, 4, 5
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'b', 'b', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Peak Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-02T00:00:00.000000000',
'2020-01-04T00:00:00.000000000', '2020-01-03T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Start Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-04T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Valley Timestamp'].values,
np.array([
'2020-01-02T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-05T00:00:00.000000000', '2020-01-05T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['End Timestamp'].values,
np.array([
'2020-01-03T00:00:00.000000000', '2020-01-05T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-06T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Peak Value'].values,
np.array([
2., 3., 4., 2., 3., 3.
])
)
np.testing.assert_array_equal(
records_readable['Valley Value'].values,
np.array([
1., 1., 1., 1., 1., 1.
])
)
np.testing.assert_array_equal(
records_readable['End Value'].values,
np.array([
3., 4., 1., 3., 4., 2.
])
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Recovered', 'Recovered', 'Active', 'Recovered', 'Recovered', 'Active'
])
)
def test_drawdown(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].drawdown.values,
np.array([-0.5, -0.66666667, -0.75])
)
np.testing.assert_array_almost_equal(
drawdowns.drawdown.values,
np.array([-0.5, -0.66666667, -0.75, -0.5, -0.66666667, -0.66666667])
)
pd.testing.assert_frame_equal(
drawdowns.drawdown.to_pd(),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[-0.5, np.nan, np.nan, np.nan],
[np.nan, -0.5, np.nan, np.nan],
[-0.66666669, np.nan, np.nan, np.nan],
[-0.75, -0.66666669, -0.66666669, np.nan]
]),
index=ts2.index,
columns=ts2.columns
)
)
def test_avg_drawdown(self):
assert drawdowns['a'].avg_drawdown() == -0.6388888888888888
pd.testing.assert_series_equal(
drawdowns.avg_drawdown(),
pd.Series(
np.array([-0.63888889, -0.58333333, -0.66666667, np.nan]),
index=wrapper.columns
).rename('avg_drawdown')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_drawdown(),
pd.Series(
np.array([-0.6166666666666666, -0.6666666666666666]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_drawdown')
)
def test_max_drawdown(self):
assert drawdowns['a'].max_drawdown() == -0.75
pd.testing.assert_series_equal(
drawdowns.max_drawdown(),
pd.Series(
np.array([-0.75, -0.66666667, -0.66666667, np.nan]),
index=wrapper.columns
).rename('max_drawdown')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_drawdown(),
pd.Series(
np.array([-0.75, -0.6666666666666666]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_drawdown')
)
def test_recovery_return(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_return.values,
np.array([2., 3., 0.])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_return.values,
np.array([2., 3., 0., 2., 3., 1.])
)
pd.testing.assert_frame_equal(
drawdowns.recovery_return.to_pd(),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[2.0, np.nan, np.nan, np.nan],
[np.nan, 2.0, np.nan, np.nan],
[3.0, np.nan, np.nan, np.nan],
[0.0, 3.0, 1.0, np.nan]
]),
index=ts2.index,
columns=ts2.columns
)
)
def test_avg_recovery_return(self):
assert drawdowns['a'].avg_recovery_return() == 1.6666666666666667
pd.testing.assert_series_equal(
drawdowns.avg_recovery_return(),
pd.Series(
np.array([1.6666666666666667, 2.5, 1.0, np.nan]),
index=wrapper.columns
).rename('avg_recovery_return')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_recovery_return(),
pd.Series(
np.array([2.0, 1.0]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_recovery_return')
)
def test_max_recovery_return(self):
assert drawdowns['a'].max_recovery_return() == 3.0
pd.testing.assert_series_equal(
drawdowns.max_recovery_return(),
pd.Series(
np.array([3.0, 3.0, 1.0, np.nan]),
index=wrapper.columns
).rename('max_recovery_return')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_recovery_return(),
pd.Series(
np.array([3.0, 1.0]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_recovery_return')
)
def test_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].duration.values,
np.array([1, 1, 1])
)
np.testing.assert_array_almost_equal(
drawdowns.duration.values,
np.array([1, 1, 1, 1, 1, 3])
)
def test_avg_duration(self):
assert drawdowns['a'].avg_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.avg_duration(),
pd.Series(
np.array([86400000000000, 86400000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('avg_duration')
)
pd.testing.assert_series_equal(
drawdowns_grouped.avg_duration(),
pd.Series(
np.array([86400000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('avg_duration')
)
def test_max_duration(self):
assert drawdowns['a'].max_duration() == pd.Timedelta('1 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.max_duration(),
pd.Series(
np.array([86400000000000, 86400000000000, 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('max_duration')
)
pd.testing.assert_series_equal(
drawdowns_grouped.max_duration(),
pd.Series(
np.array([86400000000000, 259200000000000], dtype='timedelta64[ns]'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('max_duration')
)
def test_coverage(self):
assert drawdowns['a'].coverage() == 0.5
pd.testing.assert_series_equal(
drawdowns.coverage(),
pd.Series(
np.array([0.5, 0.3333333333333333, 0.5, np.nan]),
index=ts2.columns
).rename('coverage')
)
pd.testing.assert_series_equal(
drawdowns_grouped.coverage(),
pd.Series(
np.array([0.4166666666666667, 0.25]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('coverage')
)
def test_decline_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].decline_duration.values,
np.array([1., 1., 1.])
)
np.testing.assert_array_almost_equal(
drawdowns.decline_duration.values,
np.array([1., 1., 1., 1., 1., 2.])
)
def test_recovery_duration(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_duration.values,
np.array([1, 1, 0])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_duration.values,
np.array([1, 1, 0, 1, 1, 1])
)
def test_recovery_duration_ratio(self):
np.testing.assert_array_almost_equal(
drawdowns['a'].recovery_duration_ratio.values,
np.array([1., 1., 0.])
)
np.testing.assert_array_almost_equal(
drawdowns.recovery_duration_ratio.values,
np.array([1., 1., 0., 1., 1., 0.5])
)
def test_active_records(self):
assert isinstance(drawdowns.active, vbt.Drawdowns)
assert drawdowns.active.wrapper == drawdowns.wrapper
record_arrays_close(
drawdowns['a'].active.values,
np.array([
(2, 0, 4, 5, 5, 5, 4., 1., 1., 0)
], dtype=drawdown_dt)
)
record_arrays_close(
drawdowns['a'].active.values,
drawdowns.active['a'].values
)
record_arrays_close(
drawdowns.active.values,
np.array([
(2, 0, 4, 5, 5, 5, 4.0, 1.0, 1.0, 0), (5, 2, 2, 3, 4, 5, 3.0, 1.0, 2.0, 0)
], dtype=drawdown_dt)
)
def test_recovered_records(self):
assert isinstance(drawdowns.recovered, vbt.Drawdowns)
assert drawdowns.recovered.wrapper == drawdowns.wrapper
record_arrays_close(
drawdowns['a'].recovered.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1)
], dtype=drawdown_dt)
)
record_arrays_close(
drawdowns['a'].recovered.values,
drawdowns.recovered['a'].values
)
record_arrays_close(
drawdowns.recovered.values,
np.array([
(0, 0, 0, 1, 1, 2, 2.0, 1.0, 3.0, 1), (1, 0, 2, 3, 3, 4, 3.0, 1.0, 4.0, 1),
(3, 1, 1, 2, 2, 3, 2.0, 1.0, 3.0, 1), (4, 1, 3, 4, 4, 5, 3.0, 1.0, 4.0, 1)
], dtype=drawdown_dt)
)
def test_active_drawdown(self):
assert drawdowns['a'].active_drawdown() == -0.75
pd.testing.assert_series_equal(
drawdowns.active_drawdown(),
pd.Series(
np.array([-0.75, np.nan, -0.3333333333333333, np.nan]),
index=wrapper.columns
).rename('active_drawdown')
)
with pytest.raises(Exception):
drawdowns_grouped.active_drawdown()
def test_active_duration(self):
assert drawdowns['a'].active_duration() == np.timedelta64(86400000000000)
pd.testing.assert_series_equal(
drawdowns.active_duration(),
pd.Series(
np.array([86400000000000, 'NaT', 259200000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('active_duration')
)
with pytest.raises(Exception):
drawdowns_grouped.active_duration()
def test_active_recovery(self):
assert drawdowns['a'].active_recovery() == 0.
pd.testing.assert_series_equal(
drawdowns.active_recovery(),
pd.Series(
np.array([0., np.nan, 0.5, np.nan]),
index=wrapper.columns
).rename('active_recovery')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery()
def test_active_recovery_return(self):
assert drawdowns['a'].active_recovery_return() == 0.
pd.testing.assert_series_equal(
drawdowns.active_recovery_return(),
pd.Series(
np.array([0., np.nan, 1., np.nan]),
index=wrapper.columns
).rename('active_recovery_return')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery_return()
def test_active_recovery_duration(self):
assert drawdowns['a'].active_recovery_duration() == pd.Timedelta('0 days 00:00:00')
pd.testing.assert_series_equal(
drawdowns.active_recovery_duration(),
pd.Series(
np.array([0, 'NaT', 86400000000000, 'NaT'], dtype='timedelta64[ns]'),
index=wrapper.columns
).rename('active_recovery_duration')
)
with pytest.raises(Exception):
drawdowns_grouped.active_recovery_duration()
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Coverage [%]', 'Total Records',
'Total Recovered Drawdowns', 'Total Active Drawdowns',
'Active Drawdown [%]', 'Active Duration', 'Active Recovery [%]',
'Active Recovery Return [%]', 'Active Recovery Duration',
'Max Drawdown [%]', 'Avg Drawdown [%]', 'Max Drawdown Duration',
'Avg Drawdown Duration', 'Max Recovery Return [%]',
'Avg Recovery Return [%]', 'Max Recovery Duration',
'Avg Recovery Duration', 'Avg Recovery Duration Ratio'
], dtype='object')
pd.testing.assert_series_equal(
drawdowns.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 44.444444444444436, 1.5, 1.0, 0.5,
54.166666666666664, pd.Timedelta('2 days 00:00:00'), 25.0, 50.0,
pd.Timedelta('0 days 12:00:00'), 66.66666666666666, 58.33333333333333,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 300.0, 250.0,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
drawdowns.stats(settings=dict(incl_active=True)),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 44.444444444444436, 1.5, 1.0, 0.5,
54.166666666666664, pd.Timedelta('2 days 00:00:00'), 25.0, 50.0,
pd.Timedelta('0 days 12:00:00'), 69.44444444444444, 62.962962962962955,
pd.Timedelta('1 days 16:00:00'), pd.Timedelta('1 days 16:00:00'), 300.0, 250.0,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
drawdowns.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 50.0, 3, 2, 1, 75.0, pd.Timedelta('1 days 00:00:00'),
0.0, 0.0, pd.Timedelta('0 days 00:00:00'), 66.66666666666666, 58.33333333333333,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 300.0, 250.0,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
drawdowns.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-06 00:00:00'),
pd.Timedelta('6 days 00:00:00'), 41.66666666666667, 5, 4, 1, 66.66666666666666,
58.33333333333333, pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'),
300.0, 250.0, pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'), 1.0
],
index=pd.Index([
'Start', 'End', 'Period', 'Coverage [%]', 'Total Records',
'Total Recovered Drawdowns', 'Total Active Drawdowns',
'Max Drawdown [%]', 'Avg Drawdown [%]', 'Max Drawdown Duration',
'Avg Drawdown Duration', 'Max Recovery Return [%]',
'Avg Recovery Return [%]', 'Max Recovery Duration',
'Avg Recovery Duration', 'Avg Recovery Duration Ratio'
], dtype='object'),
name='g1'
)
)
pd.testing.assert_series_equal(
drawdowns['c'].stats(),
drawdowns.stats(column='c')
)
pd.testing.assert_series_equal(
drawdowns['c'].stats(),
drawdowns.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
drawdowns_grouped['g2'].stats(),
drawdowns_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
drawdowns_grouped['g2'].stats(),
drawdowns.stats(column='g2', group_by=group_by)
)
stats_df = drawdowns.stats(agg_func=None)
assert stats_df.shape == (4, 21)
pd.testing.assert_index_equal(stats_df.index, drawdowns.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# orders.py ############# #
close = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5),
datetime(2020, 1, 6),
datetime(2020, 1, 7),
datetime(2020, 1, 8)
]).vbt.tile(4, keys=['a', 'b', 'c', 'd'])
size = np.full(close.shape, np.nan, dtype=np.float_)
size[:, 0] = [1, 0.1, -1, -0.1, np.nan, 1, -1, 2]
size[:, 1] = [-1, -0.1, 1, 0.1, np.nan, -1, 1, -2]
size[:, 2] = [1, 0.1, -1, -0.1, np.nan, 1, -2, 2]
orders = vbt.Portfolio.from_orders(close, size, fees=0.01, freq='1 days').orders
orders_grouped = orders.regroup(group_by)
class TestOrders:
def test_mapped_fields(self):
for name in order_dt.names:
np.testing.assert_array_equal(
getattr(orders, name).values,
orders.values[name]
)
def test_close(self):
pd.testing.assert_frame_equal(
orders.close,
close
)
pd.testing.assert_series_equal(
orders['a'].close,
close['a']
)
pd.testing.assert_frame_equal(
orders_grouped['g1'].close,
close[['a', 'b']]
)
assert orders.replace(close=None)['a'].close is None
def test_records_readable(self):
records_readable = orders.records_readable
np.testing.assert_array_equal(
records_readable['Order Id'].values,
np.array([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20
])
)
np.testing.assert_array_equal(
records_readable['Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-02T00:00:00.000000000',
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-07T00:00:00.000000000',
'2020-01-08T00:00:00.000000000', '2020-01-01T00:00:00.000000000',
'2020-01-02T00:00:00.000000000', '2020-01-03T00:00:00.000000000',
'2020-01-04T00:00:00.000000000', '2020-01-06T00:00:00.000000000',
'2020-01-07T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-01T00:00:00.000000000', '2020-01-02T00:00:00.000000000',
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-07T00:00:00.000000000',
'2020-01-08T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'b', 'b',
'b', 'c', 'c', 'c', 'c', 'c', 'c', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Size'].values,
np.array([
1.0, 0.1, 1.0, 0.1, 1.0, 1.0, 2.0, 1.0, 0.1, 1.0, 0.1, 1.0, 1.0,
2.0, 1.0, 0.1, 1.0, 0.1, 1.0, 2.0, 2.0
])
)
np.testing.assert_array_equal(
records_readable['Price'].values,
np.array([
1.0, 2.0, 3.0, 4.0, 6.0, 7.0, 8.0, 1.0, 2.0, 3.0, 4.0, 6.0, 7.0,
8.0, 1.0, 2.0, 3.0, 4.0, 6.0, 7.0, 8.0
])
)
np.testing.assert_array_equal(
records_readable['Fees'].values,
np.array([
0.01, 0.002, 0.03, 0.004, 0.06, 0.07, 0.16, 0.01, 0.002, 0.03,
0.004, 0.06, 0.07, 0.16, 0.01, 0.002, 0.03, 0.004, 0.06, 0.14,
0.16
])
)
np.testing.assert_array_equal(
records_readable['Side'].values,
np.array([
'Buy', 'Buy', 'Sell', 'Sell', 'Buy', 'Sell', 'Buy', 'Sell', 'Sell',
'Buy', 'Buy', 'Sell', 'Buy', 'Sell', 'Buy', 'Buy', 'Sell', 'Sell',
'Buy', 'Sell', 'Buy'
])
)
def test_buy_records(self):
assert isinstance(orders.buy, vbt.Orders)
assert orders.buy.wrapper == orders.wrapper
record_arrays_close(
orders['a'].buy.values,
np.array([
(0, 0, 0, 1., 1., 0.01, 0), (1, 0, 1, 0.1, 2., 0.002, 0),
(4, 0, 5, 1., 6., 0.06, 0), (6, 0, 7, 2., 8., 0.16, 0)
], dtype=order_dt)
)
record_arrays_close(
orders['a'].buy.values,
orders.buy['a'].values
)
record_arrays_close(
orders.buy.values,
np.array([
(0, 0, 0, 1., 1., 0.01, 0), (1, 0, 1, 0.1, 2., 0.002, 0),
(4, 0, 5, 1., 6., 0.06, 0), (6, 0, 7, 2., 8., 0.16, 0),
(9, 1, 2, 1., 3., 0.03, 0), (10, 1, 3, 0.1, 4., 0.004, 0),
(12, 1, 6, 1., 7., 0.07, 0), (14, 2, 0, 1., 1., 0.01, 0),
(15, 2, 1, 0.1, 2., 0.002, 0), (18, 2, 5, 1., 6., 0.06, 0),
(20, 2, 7, 2., 8., 0.16, 0)
], dtype=order_dt)
)
def test_sell_records(self):
assert isinstance(orders.sell, vbt.Orders)
assert orders.sell.wrapper == orders.wrapper
record_arrays_close(
orders['a'].sell.values,
np.array([
(2, 0, 2, 1., 3., 0.03, 1), (3, 0, 3, 0.1, 4., 0.004, 1),
(5, 0, 6, 1., 7., 0.07, 1)
], dtype=order_dt)
)
record_arrays_close(
orders['a'].sell.values,
orders.sell['a'].values
)
record_arrays_close(
orders.sell.values,
np.array([
(2, 0, 2, 1., 3., 0.03, 1), (3, 0, 3, 0.1, 4., 0.004, 1),
(5, 0, 6, 1., 7., 0.07, 1), (7, 1, 0, 1., 1., 0.01, 1),
(8, 1, 1, 0.1, 2., 0.002, 1), (11, 1, 5, 1., 6., 0.06, 1),
(13, 1, 7, 2., 8., 0.16, 1), (16, 2, 2, 1., 3., 0.03, 1),
(17, 2, 3, 0.1, 4., 0.004, 1), (19, 2, 6, 2., 7., 0.14, 1)
], dtype=order_dt)
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Total Records', 'Total Buy Orders', 'Total Sell Orders',
'Min Size', 'Max Size', 'Avg Size', 'Avg Buy Size', 'Avg Sell Size',
'Avg Buy Price', 'Avg Sell Price', 'Total Fees', 'Min Fees', 'Max Fees',
'Avg Fees', 'Avg Buy Fees', 'Avg Sell Fees'
], dtype='object')
pd.testing.assert_series_equal(
orders.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), 5.25, 2.75, 2.5, 0.10000000000000002, 2.0,
0.9333333333333335, 0.9166666666666666, 0.9194444444444446, 4.388888888888889,
4.527777777777779, 0.26949999999999996, 0.002, 0.16, 0.051333333333333335,
0.050222222222222224, 0.050222222222222224
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
orders.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), 7, 4, 3, 0.1, 2.0, 0.8857142857142858,
1.025, 0.7000000000000001, 4.25, 4.666666666666667, 0.33599999999999997,
0.002, 0.16, 0.047999999999999994, 0.057999999999999996, 0.03466666666666667
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
orders.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), 14, 7, 7, 0.1, 2.0, 0.8857142857142858,
0.8857142857142856, 0.8857142857142858, 4.428571428571429, 4.428571428571429,
0.672, 0.002, 0.16, 0.048, 0.048, 0.047999999999999994
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
orders['c'].stats(),
orders.stats(column='c')
)
pd.testing.assert_series_equal(
orders['c'].stats(),
orders.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
orders_grouped['g2'].stats(),
orders_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
orders_grouped['g2'].stats(),
orders.stats(column='g2', group_by=group_by)
)
stats_df = orders.stats(agg_func=None)
assert stats_df.shape == (4, 19)
pd.testing.assert_index_equal(stats_df.index, orders.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# trades.py ############# #
exit_trades = vbt.ExitTrades.from_orders(orders)
exit_trades_grouped = vbt.ExitTrades.from_orders(orders_grouped)
class TestExitTrades:
def test_mapped_fields(self):
for name in trade_dt.names:
if name == 'return':
np.testing.assert_array_equal(
getattr(exit_trades, 'returns').values,
exit_trades.values[name]
)
else:
np.testing.assert_array_equal(
getattr(exit_trades, name).values,
exit_trades.values[name]
)
def test_close(self):
pd.testing.assert_frame_equal(
exit_trades.close,
close
)
pd.testing.assert_series_equal(
exit_trades['a'].close,
close['a']
)
pd.testing.assert_frame_equal(
exit_trades_grouped['g1'].close,
close[['a', 'b']]
)
assert exit_trades.replace(close=None)['a'].close is None
def test_records_arr(self):
record_arrays_close(
exit_trades.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1),
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2),
(4, 1, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, -1.95, -1.7875, 1, 1, 3),
(5, 1, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, -0.296, -2.71333333, 1, 1, 3),
(6, 1, 1., 5, 6., 0.06, 6, 7., 0.07, -1.13, -0.18833333, 1, 1, 4),
(7, 1, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 1, 0, 5),
(8, 2, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 6),
(9, 2, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 6),
(10, 2, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 7),
(11, 2, 1., 6, 7., 0.07, 7, 8., 0.08, -1.15, -0.16428571, 1, 1, 8),
(12, 2, 1., 7, 8., 0.08, 7, 8., 0., -0.08, -0.01, 0, 0, 9)
], dtype=trade_dt)
)
reversed_col_orders = orders.replace(records_arr=np.concatenate((
orders.values[orders.values['col'] == 2],
orders.values[orders.values['col'] == 1],
orders.values[orders.values['col'] == 0]
)))
record_arrays_close(
vbt.ExitTrades.from_orders(reversed_col_orders).values,
exit_trades.values
)
def test_records_readable(self):
records_readable = exit_trades.records_readable
np.testing.assert_array_equal(
records_readable['Exit Trade Id'].values,
np.array([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12
])
)
np.testing.assert_array_equal(
records_readable['Column'].values,
np.array([
'a', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'c', 'c', 'c', 'c', 'c'
])
)
np.testing.assert_array_equal(
records_readable['Size'].values,
np.array([
1.0, 0.10000000000000009, 1.0, 2.0, 1.0, 0.10000000000000009, 1.0,
2.0, 1.0, 0.10000000000000009, 1.0, 1.0, 1.0
])
)
np.testing.assert_array_equal(
records_readable['Entry Timestamp'].values,
np.array([
'2020-01-01T00:00:00.000000000', '2020-01-01T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-01T00:00:00.000000000', '2020-01-01T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-01T00:00:00.000000000', '2020-01-01T00:00:00.000000000',
'2020-01-06T00:00:00.000000000', '2020-01-07T00:00:00.000000000',
'2020-01-08T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Avg Entry Price'].values,
np.array([
1.0909090909090908, 1.0909090909090908, 6.0, 8.0,
1.0909090909090908, 1.0909090909090908, 6.0, 8.0,
1.0909090909090908, 1.0909090909090908, 6.0, 7.0, 8.0
])
)
np.testing.assert_array_equal(
records_readable['Entry Fees'].values,
np.array([
0.010909090909090908, 0.0010909090909090918, 0.06, 0.16,
0.010909090909090908, 0.0010909090909090918, 0.06, 0.16,
0.010909090909090908, 0.0010909090909090918, 0.06, 0.07, 0.08
])
)
np.testing.assert_array_equal(
records_readable['Exit Timestamp'].values,
np.array([
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-07T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-07T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-03T00:00:00.000000000', '2020-01-04T00:00:00.000000000',
'2020-01-07T00:00:00.000000000', '2020-01-08T00:00:00.000000000',
'2020-01-08T00:00:00.000000000'
], dtype='datetime64[ns]')
)
np.testing.assert_array_equal(
records_readable['Avg Exit Price'].values,
np.array([
3.0, 4.0, 7.0, 8.0, 3.0, 4.0, 7.0, 8.0, 3.0, 4.0, 7.0, 8.0, 8.0
])
)
np.testing.assert_array_equal(
records_readable['Exit Fees'].values,
np.array([
0.03, 0.004, 0.07, 0.0, 0.03, 0.004, 0.07, 0.0, 0.03, 0.004, 0.07, 0.08, 0.0
])
)
np.testing.assert_array_equal(
records_readable['PnL'].values,
np.array([
1.8681818181818182, 0.2858181818181821, 0.8699999999999999, -0.16,
-1.9500000000000002, -0.29600000000000026, -1.1300000000000001,
-0.16, 1.8681818181818182, 0.2858181818181821, 0.8699999999999999,
-1.1500000000000001, -0.08
])
)
np.testing.assert_array_equal(
records_readable['Return'].values,
np.array([
1.7125000000000001, 2.62, 0.145, -0.01, -1.7875000000000003,
-2.7133333333333334, -0.18833333333333335, -0.01,
1.7125000000000001, 2.62, 0.145, -0.1642857142857143, -0.01
])
)
np.testing.assert_array_equal(
records_readable['Direction'].values,
np.array([
'Long', 'Long', 'Long', 'Long', 'Short', 'Short', 'Short',
'Short', 'Long', 'Long', 'Long', 'Short', 'Long'
])
)
np.testing.assert_array_equal(
records_readable['Status'].values,
np.array([
'Closed', 'Closed', 'Closed', 'Open', 'Closed', 'Closed', 'Closed',
'Open', 'Closed', 'Closed', 'Closed', 'Closed', 'Open'
])
)
np.testing.assert_array_equal(
records_readable['Position Id'].values,
np.array([
0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9
])
)
def test_duration(self):
np.testing.assert_array_almost_equal(
exit_trades['a'].duration.values,
np.array([2, 3, 1, 1])
)
np.testing.assert_array_almost_equal(
exit_trades.duration.values,
np.array([2, 3, 1, 1, 2, 3, 1, 1, 2, 3, 1, 1, 1])
)
def test_winning_records(self):
assert isinstance(exit_trades.winning, vbt.ExitTrades)
assert exit_trades.winning.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].winning.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1)
], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].winning.values,
exit_trades.winning['a'].values
)
record_arrays_close(
exit_trades.winning.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1),
(8, 2, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 6),
(9, 2, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 6),
(10, 2, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 7)
], dtype=trade_dt)
)
def test_losing_records(self):
assert isinstance(exit_trades.losing, vbt.ExitTrades)
assert exit_trades.losing.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].losing.values,
np.array([
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2)
], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].losing.values,
exit_trades.losing['a'].values
)
record_arrays_close(
exit_trades.losing.values,
np.array([
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2),
(4, 1, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, -1.95, -1.7875, 1, 1, 3),
(5, 1, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, -0.296, -2.71333333, 1, 1, 3),
(6, 1, 1., 5, 6., 0.06, 6, 7., 0.07, -1.13, -0.18833333, 1, 1, 4),
(7, 1, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 1, 0, 5),
(11, 2, 1., 6, 7., 0.07, 7, 8., 0.08, -1.15, -0.16428571, 1, 1, 8),
(12, 2, 1., 7, 8., 0.08, 7, 8., 0., -0.08, -0.01, 0, 0, 9)
], dtype=trade_dt)
)
def test_win_rate(self):
assert exit_trades['a'].win_rate() == 0.75
pd.testing.assert_series_equal(
exit_trades.win_rate(),
pd.Series(
np.array([0.75, 0., 0.6, np.nan]),
index=close.columns
).rename('win_rate')
)
pd.testing.assert_series_equal(
exit_trades_grouped.win_rate(),
pd.Series(
np.array([0.375, 0.6]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('win_rate')
)
def test_winning_streak(self):
np.testing.assert_array_almost_equal(
exit_trades['a'].winning_streak.values,
np.array([1, 2, 3, 0])
)
np.testing.assert_array_almost_equal(
exit_trades.winning_streak.values,
np.array([1, 2, 3, 0, 0, 0, 0, 0, 1, 2, 3, 0, 0])
)
def test_losing_streak(self):
np.testing.assert_array_almost_equal(
exit_trades['a'].losing_streak.values,
np.array([0, 0, 0, 1])
)
np.testing.assert_array_almost_equal(
exit_trades.losing_streak.values,
np.array([0, 0, 0, 1, 1, 2, 3, 4, 0, 0, 0, 1, 2])
)
def test_profit_factor(self):
assert exit_trades['a'].profit_factor() == 18.9
pd.testing.assert_series_equal(
exit_trades.profit_factor(),
pd.Series(
np.array([18.9, 0., 2.45853659, np.nan]),
index=ts2.columns
).rename('profit_factor')
)
pd.testing.assert_series_equal(
exit_trades_grouped.profit_factor(),
pd.Series(
np.array([0.81818182, 2.45853659]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('profit_factor')
)
def test_expectancy(self):
assert exit_trades['a'].expectancy() == 0.716
pd.testing.assert_series_equal(
exit_trades.expectancy(),
pd.Series(
np.array([0.716, -0.884, 0.3588, np.nan]),
index=ts2.columns
).rename('expectancy')
)
pd.testing.assert_series_equal(
exit_trades_grouped.expectancy(),
pd.Series(
np.array([-0.084, 0.3588]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('expectancy')
)
def test_sqn(self):
assert exit_trades['a'].sqn() == 1.634155521947584
pd.testing.assert_series_equal(
exit_trades.sqn(),
pd.Series(
np.array([1.63415552, -2.13007307, 0.71660403, np.nan]),
index=ts2.columns
).rename('sqn')
)
pd.testing.assert_series_equal(
exit_trades_grouped.sqn(),
pd.Series(
np.array([-0.20404671, 0.71660403]),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('sqn')
)
def test_long_records(self):
assert isinstance(exit_trades.long, vbt.ExitTrades)
assert exit_trades.long.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].long.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1),
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2)
], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].long.values,
exit_trades.long['a'].values
)
record_arrays_close(
exit_trades.long.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1),
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2),
(8, 2, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 6),
(9, 2, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 6),
(10, 2, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 7),
(12, 2, 1., 7, 8., 0.08, 7, 8., 0., -0.08, -0.01, 0, 0, 9)
], dtype=trade_dt)
)
def test_short_records(self):
assert isinstance(exit_trades.short, vbt.ExitTrades)
assert exit_trades.short.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].short.values,
np.array([], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].short.values,
exit_trades.short['a'].values
)
record_arrays_close(
exit_trades.short.values,
np.array([
(4, 1, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, -1.95, -1.7875, 1, 1, 3),
(5, 1, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, -0.296, -2.71333333, 1, 1, 3),
(6, 1, 1., 5, 6., 0.06, 6, 7., 0.07, -1.13, -0.18833333, 1, 1, 4),
(7, 1, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 1, 0, 5),
(11, 2, 1., 6, 7., 0.07, 7, 8., 0.08, -1.15, -0.16428571, 1, 1, 8)
], dtype=trade_dt)
)
def test_open_records(self):
assert isinstance(exit_trades.open, vbt.ExitTrades)
assert exit_trades.open.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].open.values,
np.array([
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2)
], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].open.values,
exit_trades.open['a'].values
)
record_arrays_close(
exit_trades.open.values,
np.array([
(3, 0, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 0, 0, 2),
(7, 1, 2., 7, 8., 0.16, 7, 8., 0., -0.16, -0.01, 1, 0, 5),
(12, 2, 1., 7, 8., 0.08, 7, 8., 0., -0.08, -0.01, 0, 0, 9)
], dtype=trade_dt)
)
def test_closed_records(self):
assert isinstance(exit_trades.closed, vbt.ExitTrades)
assert exit_trades.closed.wrapper == exit_trades.wrapper
record_arrays_close(
exit_trades['a'].closed.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1)
], dtype=trade_dt)
)
record_arrays_close(
exit_trades['a'].closed.values,
exit_trades.closed['a'].values
)
record_arrays_close(
exit_trades.closed.values,
np.array([
(0, 0, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 0),
(1, 0, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 0),
(2, 0, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 1),
(4, 1, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, -1.95, -1.7875, 1, 1, 3),
(5, 1, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, -0.296, -2.71333333, 1, 1, 3),
(6, 1, 1., 5, 6., 0.06, 6, 7., 0.07, -1.13, -0.18833333, 1, 1, 4),
(8, 2, 1., 0, 1.09090909, 0.01090909, 2, 3., 0.03, 1.86818182, 1.7125, 0, 1, 6),
(9, 2, 0.1, 0, 1.09090909, 0.00109091, 3, 4., 0.004, 0.28581818, 2.62, 0, 1, 6),
(10, 2, 1., 5, 6., 0.06, 6, 7., 0.07, 0.87, 0.145, 0, 1, 7),
(11, 2, 1., 6, 7., 0.07, 7, 8., 0.08, -1.15, -0.16428571, 1, 1, 8)
], dtype=trade_dt)
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'First Trade Start', 'Last Trade End',
'Coverage', 'Overlap Coverage', 'Total Records', 'Total Long Trades',
'Total Short Trades', 'Total Closed Trades', 'Total Open Trades',
'Open Trade PnL', 'Win Rate [%]', 'Max Win Streak', 'Max Loss Streak',
'Best Trade [%]', 'Worst Trade [%]', 'Avg Winning Trade [%]',
'Avg Losing Trade [%]', 'Avg Winning Trade Duration',
'Avg Losing Trade Duration', 'Profit Factor', 'Expectancy', 'SQN'
], dtype='object')
pd.testing.assert_series_equal(
exit_trades.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-08 00:00:00'), pd.Timedelta('5 days 08:00:00'),
pd.Timedelta('2 days 00:00:00'), 3.25, 2.0, 1.25, 2.5, 0.75, -0.1,
58.333333333333336, 2.0, 1.3333333333333333, 168.38888888888889,
-91.08730158730158, 149.25, -86.3670634920635, pd.Timedelta('2 days 00:00:00'),
pd.Timedelta('1 days 12:00:00'), np.inf, 0.11705555555555548, 0.18931590012681135
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
exit_trades.stats(settings=dict(incl_open=True)),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-08 00:00:00'), pd.Timedelta('5 days 08:00:00'),
pd.Timedelta('2 days 00:00:00'), 3.25, 2.0, 1.25, 2.5, 0.75, -0.1,
58.333333333333336, 2.0, 2.3333333333333335, 174.33333333333334,
-96.25396825396825, 149.25, -42.39781746031746, pd.Timedelta('2 days 00:00:00'),
pd.Timedelta('1 days 06:00:00'), 7.11951219512195, 0.06359999999999993, 0.07356215977397455
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
exit_trades.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-08 00:00:00'), pd.Timedelta('5 days 00:00:00'),
pd.Timedelta('2 days 00:00:00'), 4, 4, 0, 3, 1, -0.16, 100.0, 3, 0,
262.0, 14.499999999999998, 149.25, np.nan, pd.Timedelta('2 days 00:00:00'),
pd.NaT, np.inf, 1.008, 2.181955050824476
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
exit_trades.stats(column='g1', group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-08 00:00:00'),
pd.Timedelta('8 days 00:00:00'), pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-08 00:00:00'), pd.Timedelta('5 days 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 8, 4, 4, 6, 2, -0.32, 50.0, 3, 3, 262.0,
-271.3333333333333, 149.25, -156.30555555555557, pd.Timedelta('2 days 00:00:00'),
|
pd.Timedelta('2 days 00:00:00')
|
pandas.Timedelta
|
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays.sparse import (
SparseArray,
SparseDtype,
)
arr_data = np.array([np.nan, np.nan, 1, 2, 3, np.nan, 4, 5, np.nan, 6])
arr = SparseArray(arr_data)
class TestGetitem:
def test_getitem(self):
dense = arr.to_dense()
for i in range(len(arr)):
tm.assert_almost_equal(arr[i], dense[i])
tm.assert_almost_equal(arr[-i], dense[-i])
def test_getitem_arraylike_mask(self):
arr = SparseArray([0, 1, 2])
result = arr[[True, False, True]]
expected = SparseArray([0, 2])
tm.assert_sp_array_equal(result, expected)
@pytest.mark.parametrize(
"slc",
[
np.s_[:],
np.s_[1:10],
np.s_[1:100],
np.s_[10:1],
np.s_[:-3],
np.s_[-5:-4],
np.s_[:-12],
np.s_[-12:],
np.s_[2:],
np.s_[2::3],
np.s_[::2],
np.s_[::-1],
np.s_[::-2],
np.s_[1:6:2],
np.s_[:-6:-2],
],
)
@pytest.mark.parametrize(
"as_dense", [[np.nan] * 10, [1] * 10, [np.nan] * 5 + [1] * 5, []]
)
def test_getslice(self, slc, as_dense):
as_dense = np.array(as_dense)
arr = SparseArray(as_dense)
result = arr[slc]
expected = SparseArray(as_dense[slc])
tm.assert_sp_array_equal(result, expected)
def test_getslice_tuple(self):
dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0])
sparse = SparseArray(dense)
res = sparse[(slice(4, None),)]
exp = SparseArray(dense[4:])
tm.assert_sp_array_equal(res, exp)
sparse = SparseArray(dense, fill_value=0)
res = sparse[(slice(4, None),)]
exp = SparseArray(dense[4:], fill_value=0)
|
tm.assert_sp_array_equal(res, exp)
|
pandas._testing.assert_sp_array_equal
|
import glob
import os
import numpy as np
import pandas as pd
def get_accuracy_paths(loss_dir, dataset, init, layer):
prefix = os.path.join(loss_dir, dataset + "_" + init, "layers_" + str(layer))
suffix = 'acc_test.npy'
return glob.glob(os.path.join(prefix + '*', suffix))
def build_frame(aggr_func, accuracies, datasets, inits, layers):
frame =
|
pd.DataFrame(index=layers)
|
pandas.DataFrame
|
"""Loading example datasets."""
from os.path import dirname, join
import datetime
import io
import requests
import numpy as np
import pandas as pd
import time
def load_daily(long: bool = True):
"""2020 Covid, Air Pollution, and Economic Data.
Sources: Covid Tracking Project, EPA, and FRED
Args:
long (bool): if True, return data in long format. Otherwise return wide
"""
module_path = dirname(__file__)
data_file_name = join(module_path, 'data', 'covid_daily.zip')
df_wide = pd.read_csv(data_file_name, index_col=0, parse_dates=True)
if not long:
return df_wide
else:
df_long = df_wide.reset_index(drop=False).melt(
id_vars=['datetime'], var_name='series_id', value_name='value'
)
return df_long
def load_fred_monthly():
"""
Federal Reserve of St. Louis.
from autots.datasets.fred import get_fred_data
SeriesNameDict = {'GS10':'10-Year Treasury Constant Maturity Rate',
'MCOILWTICO':'Crude Oil West Texas Intermediate Cushing Oklahoma',
'CSUSHPISA': ' U.S. National Home Price Index',
'EXUSEU': 'US Euro Foreign Exchange Rate',
'EXCHUS': 'China US Foreign Exchange Rate',
'EXCAUS' : 'Canadian to US Dollar Exchange Rate Daily',
'EMVOVERALLEMV': 'Equity Market Volatility Tracker Overall', # this is a more irregular series
'T10YIEM' : '10 Year Breakeven Inflation Rate',
'USEPUINDXM': 'Economic Policy Uncertainty Index for United States' # also very irregular
}
monthly_data = get_fred_data(fredkey = 'XXXXXXXXX', SeriesNameDict = SeriesNameDict)
"""
module_path = dirname(__file__)
data_file_name = join(module_path, 'data', 'fred_monthly.zip')
df_long = pd.read_csv(data_file_name, compression='zip')
df_long['datetime'] = pd.to_datetime(
df_long['datetime'], infer_datetime_format=True
)
return df_long
def load_monthly(long: bool = True):
"""Federal Reserve of St. Louis monthly economic indicators."""
if long:
return load_fred_monthly()
else:
from autots.tools.shaping import long_to_wide
df_long = load_fred_monthly()
df_wide = long_to_wide(
df_long,
date_col='datetime',
value_col='value',
id_col='series_id',
aggfunc='first',
)
return df_wide
def load_fred_yearly():
"""
Federal Reserve of St. Louis.
from autots.datasets.fred import get_fred_data
SSeriesNameDict = {'GDPA':"Gross Domestic Product",
'ACOILWTICO':'Crude Oil West Texas Intermediate Cushing Oklahoma',
'AEXUSEU': 'US Euro Foreign Exchange Rate',
'AEXCHUS': 'China US Foreign Exchange Rate',
'AEXCAUS' : 'Canadian to US Dollar Exchange Rate Daily',
'MEHOINUSA672N': 'Real Median US Household Income',
'CPALTT01USA661S': 'Consumer Price Index All Items',
'FYFSD': 'Federal Surplus or Deficit',
'DDDM01USA156NWDB': 'Stock Market Capitalization to US GDP',
'LEU0252881600A': 'Median Weekly Earnings for Salary Workers',
'LFWA64TTUSA647N': 'US Working Age Population',
'IRLTLT01USA156N' : 'Long Term Government Bond Yields'
}
monthly_data = get_fred_data(fredkey = 'XXXXXXXXX', SeriesNameDict = SeriesNameDict)
"""
module_path = dirname(__file__)
data_file_name = join(module_path, 'data', 'fred_yearly.zip')
df_long = pd.read_csv(data_file_name)
df_long['datetime'] = pd.to_datetime(
df_long['datetime'], infer_datetime_format=True
)
return df_long
def load_yearly(long: bool = True):
"""Federal Reserve of St. Louis annual economic indicators."""
if long:
return load_fred_yearly()
else:
from autots.tools.shaping import long_to_wide
df_long = load_fred_yearly()
df_wide = long_to_wide(
df_long,
date_col='datetime',
value_col='value',
id_col='series_id',
aggfunc='first',
)
return df_wide
def load_traffic_hourly(long: bool = True):
"""
From the MN DOT via the UCI data repository.
Yes, Minnesota is the best state of the Union.
"""
module_path = dirname(__file__)
data_file_name = join(module_path, 'data', 'traffic_hourly.zip')
df_wide = pd.read_csv(
data_file_name, index_col=0, parse_dates=True, compression='zip'
)
if not long:
return df_wide
else:
df_long = df_wide.reset_index(drop=False).melt(
id_vars=['datetime'], var_name='series_id', value_name='value'
)
return df_long
def load_hourly(long: bool = True):
"""Traffic data from the MN DOT via the UCI data repository."""
return load_traffic_hourly(long=long)
def load_eia_weekly():
"""Weekly petroleum industry data from the EIA."""
module_path = dirname(__file__)
data_file_name = join(module_path, 'data', 'eia_weekly.zip')
df_long = pd.read_csv(data_file_name, compression='zip')
df_long['datetime'] = pd.to_datetime(
df_long['datetime'], infer_datetime_format=True
)
return df_long
def load_weekly(long: bool = True):
"""Weekly petroleum industry data from the EIA."""
if long:
return load_eia_weekly()
else:
from autots.tools.shaping import long_to_wide
df_long = load_eia_weekly()
df_wide = long_to_wide(
df_long,
date_col='datetime',
value_col='value',
id_col='series_id',
aggfunc='first',
)
return df_wide
def load_weekdays(long: bool = False, categorical: bool = True, periods: int = 180):
"""Test edge cases by creating a Series with values as day of week.
Args:
long (bool):
if True, return a df with columns "value" and "datetime"
if False, return a Series with dt index
categorical (bool): if True, return str/object, else return int
periods (int): number of periods, ie length of data to generate
"""
idx = pd.date_range(end=pd.Timestamp.today(), periods=periods, freq="D")
df_wide = pd.Series(idx.weekday, index=idx, name="value")
df_wide.index.name = "datetime"
if categorical:
df_wide = df_wide.replace(
{
0: "Mon",
1: "Tues",
2: "Wed",
3: "Thor's",
4: "Fri",
5: "Sat",
6: "Sun",
7: "Mon",
}
)
if long:
return df_wide.reset_index()
else:
return df_wide
def load_live_daily(
long: bool = False,
fred_key: str = None,
fred_series: list = ["DGS10", "T5YIE", "SP500", "DCOILWTICO", "DEXUSEU"],
tickers: list = ["MSFT"],
trends_list: list = ["forecasting", "cycling", "cpu", "microsoft"],
weather_data_types: list = ["AWND", "WSF2", "TAVG"],
weather_stations: list = ["USW00094846", "USW00014925"],
weather_years: int = 10,
london_air_stations: list = ['CT3', 'SK8'],
london_air_species: str = "PM25",
london_air_days: int = 180,
earthquake_days: int = 180,
earthquake_min_magnitude: int = 5,
):
"""Generates a dataframe of data up to the present day.
Args:
long (bool): whether to return in long format or wide
fred_key (str): https://fred.stlouisfed.org/docs/api/api_key.html
fred_series (list): list of FRED series IDs. This requires fredapi package
tickers (list): list of stock tickers, requires yfinance
trends_list (list): list of search keywords, requires pytrends. None to skip.
weather_data_types (list): from NCEI NOAA api data types, GHCN Daily Weather Elements
PRCP, SNOW, TMAX, TMIN, TAVG, AWND, WSF1, WSF2, WSF5, WSFG
weather_stations (list): from NCEI NOAA api station ids. Pass empty list to skip.
london_air_stations (list): londonair.org.uk source station IDs. Pass empty list to skip.
london_species (str): what measurement to pull from London Air. Not all stations have all metrics.\
earthquake_min_magnitude (int): smallest earthquake magnitude to pull from earthquake.usgs.gov. Set None to skip this.
"""
dataset_lists = []
current_date = datetime.datetime.utcnow()
try:
if fred_key is not None:
from fredapi import Fred # noqa
from autots.datasets.fred import get_fred_data
fred_df = get_fred_data(fred_key, fred_series, long=False)
fred_df.index = fred_df.index.tz_localize(None)
dataset_lists.append(fred_df)
except ModuleNotFoundError:
print("pip install fredapi (and you'll also need an api key)")
except Exception as e:
print(f"FRED data failed: {repr(e)}")
for ticker in tickers:
try:
import yfinance as yf
msft = yf.Ticker(ticker)
# get historical market data
msft_hist = msft.history(period="max")
msft_hist = msft_hist.rename(columns=lambda x: x.lower().replace(" ", "_"))
msft_hist = msft_hist.rename(columns=lambda x: ticker.lower() + "_" + x)
try:
msft_hist.index = msft_hist.index.tz_localize(None)
except Exception:
pass
dataset_lists.append(msft_hist)
time.sleep(1)
except ModuleNotFoundError:
print("You need to: pip install yfinance")
except Exception as e:
print(f"yfinance data failed: {repr(e)}")
str_end_time = current_date.strftime("%Y-%m-%d")
start_date = (current_date - datetime.timedelta(days=360 * weather_years)).strftime(
"%Y-%m-%d"
)
for wstation in weather_stations:
try:
wbase = "https://www.ncei.noaa.gov/access/services/data/v1/?dataset=daily-summaries"
wargs = f"&dataTypes={','.join(weather_data_types)}&stations={wstation}"
wargs = (
wargs
+ f"&startDate={start_date}&endDate={str_end_time}&boundingBox=90,-180,-90,180&units=standard&format=csv"
)
wdf = pd.read_csv(wbase + wargs)
wdf['DATE'] = pd.to_datetime(wdf['DATE'], infer_datetime_format=True)
wdf = wdf.set_index('DATE').drop(columns=['STATION'])
wdf.rename(columns=lambda x: wstation + "_" + x, inplace=True)
dataset_lists.append(wdf)
time.sleep(1)
except Exception as e:
print(f"weather data failed: {repr(e)}")
str_end_time = current_date.strftime("%d-%b-%Y")
start_date = (current_date - datetime.timedelta(days=london_air_days)).strftime(
"%d-%b-%Y"
)
for asite in london_air_stations:
try:
# abase = "http://api.erg.ic.ac.uk/AirQuality/Data/Site/Wide/"
# aargs = "SiteCode=CT8/StartDate=2021-07-01/EndDate=2021-07-30/csv"
abase = 'https://www.londonair.org.uk/london/asp/downloadsite.asp'
aargs = f"?site={asite}&species1={london_air_species}m&species2=&species3=&species4=&species5=&species6=&start={start_date}&end={str_end_time}&res=6&period=daily&units=ugm3"
s = requests.get(abase + aargs).content
adf = pd.read_csv(io.StringIO(s.decode('utf-8')))
acol = adf['Site'].iloc[0] + "_" + adf['Species'].iloc[0]
adf['Datetime'] = pd.to_datetime(adf['ReadingDateTime'], dayfirst=True)
adf[acol] = adf['Value']
dataset_lists.append(adf[['Datetime', acol]].set_index("Datetime"))
time.sleep(1)
# "/Data/Traffic/Site/SiteCode={SiteCode}/StartDate={StartDate}/EndDate={EndDate}/Json"
except Exception as e:
print(f"London Air data failed: {repr(e)}")
if earthquake_min_magnitude is not None:
try:
str_end_time = current_date.strftime("%Y-%m-%d")
start_date = (
current_date - datetime.timedelta(days=earthquake_days)
).strftime("%Y-%m-%d")
# is limited to ~1000 rows of data, ie individual earthquakes
ebase = "https://earthquake.usgs.gov/fdsnws/event/1/query?"
eargs = f"format=csv&starttime={start_date}&endtime={str_end_time}&minmagnitude={earthquake_min_magnitude}"
eq = pd.read_csv(ebase + eargs)
eq["time"] = pd.to_datetime(eq["time"], infer_datetime_format=True)
eq["time"] = eq["time"].dt.tz_localize(None)
eq.set_index("time", inplace=True)
global_earthquakes = eq.resample("1D").agg(
{"mag": "mean", "depth": "count"}
)
global_earthquakes["mag"] = global_earthquakes["mag"].fillna(
earthquake_min_magnitude
)
global_earthquakes = global_earthquakes.rename(
columns={
"mag": "largest_magnitude_earthquake",
"depth": "count_large_earthquakes",
}
)
dataset_lists.append(global_earthquakes)
except Exception as e:
print(f"earthquake data failed: {repr(e)}")
if trends_list is not None:
try:
from pytrends.request import TrendReq
pytrends = TrendReq(hl="en-US", tz=360)
# pytrends.build_payload(kw_list, cat=0, timeframe='today 5-y', geo='', gprop='')
pytrends.build_payload(trends_list, timeframe="all")
gtrends = pytrends.interest_over_time()
gtrends.index = gtrends.index.tz_localize(None)
gtrends.drop(columns="isPartial", inplace=True, errors="ignore")
dataset_lists.append(gtrends)
except ImportError:
print("You need to: pip install pytrends")
except Exception as e:
print(f"pytrends data failed: {repr(e)}")
if len(dataset_lists) < 1:
raise ValueError("No data successfully downloaded!")
elif len(dataset_lists) == 1:
df = dataset_lists[0]
else:
from functools import reduce
df = reduce(
lambda x, y:
|
pd.merge(x, y, left_index=True, right_index=True, how="outer")
|
pandas.merge
|
import pandas as pd
import ast
from dateutil import *
import json
from Package.reader import Reader
from datetime import datetime, date, time
ALL_INDEX = False
ALL_GENERAL = False
class InstagramConnectionReader(Reader):
def get_and_concat_dataframe(self, data):
'''
This function perform the following operations in order to transform data into a dataframe:
--> data are ordrered by str(dictionaries) (ie following, blocked_users etc...) into a list
--> ast.litteral_eval recognise the list as a dictionnary
--> from this dictionnary with collect keys, these one will be labels in our dataframe
--> for each dictionnaries we collect the date and the user and put them in a list
--> from the two lists obtained, we create a Dataframe (creation of the column type and label)
--> Finally we concatenate all dataframe obtained
'''
list_dict = []
for niv1 in data :
for niv2 in niv1 :
list_dict.append(niv2)
string_dict = list_dict[0]
dictionary = ast.literal_eval(string_dict)
list_index = []
for key, value in dictionary.items():
list_index.append(key)
j = -1
for i in list_index:
j = j+1
label = str(i)
values = dictionary.get(i)
if bool(values) == True:
list_name = []
list_date = []
for key, value in values.items():
list_name.append(key)
list_date.append(value)
df = pd.DataFrame(list(zip(list_date,list_name)),columns=['date','name'])
df['type']=df.name.apply(lambda x: 'Instagram')
df['label']=df.name.apply(lambda x: label )
if j == 0 :
main_df = df
else :
main_df =
|
pd.concat([main_df,df])
|
pandas.concat
|
import tensorflow as tf
import numpy as np
import scipy.io as sio
import pandas as pd
import os
import csv
from feature_encoding import *
from keras.models import load_model
from keras.utils import to_categorical
import Efficient_CapsNet_sORF150
import Efficient_CapsNet_sORF250
import lightgbm as lgb
from sklearn.metrics import roc_auc_score
import sys
from optparse import OptionParser
##read Fasta sequence
def readFasta(file):
if os.path.exists(file) == False:
print('Error: "' + file + '" does not exist.')
sys.exit(1)
with open(file) as f:
records = f.read()
if re.search('>', records) == None:
print('The input file seems not in fasta format.')
sys.exit(1)
records = records.split('>')[1:]
myFasta = []
for fasta in records:
array = fasta.split('\n')
name, sequence = array[0].split()[0], re.sub('[^ARNDCQEGHILKMFPSTWYV-]', '-', ''.join(array[1:]).upper())
myFasta.append([name, sequence])
return myFasta
##extract sORF sequence
def get_sORF(fastas):
sORF_seq = []
for i in fastas:
name, seq = i[0], re.sub('-', '', i[1])
g = 0
if len(seq) > 303:
for j in range(len(seq)-2):
seg_start = seq[j:j+3]
if seg_start == 'ATG':
for k in range(j+3, len(seq)-2, 3):
seg_end = seq[k:k+3]
if seg_end == 'TAA':
sequence = seq[j:k+3]
if np.mod(len(sequence), 3) == 0 and 12 <= len(sequence) <= 303:
g+=1
sequence_name = '>' + name + '_sORF' + str(g)
sORF_seq.append([sequence_name, sequence])
break
if seg_end == 'TAG':
sequence = seq[j:k+3]
if np.mod(len(sequence), 3) == 0 and 12 <= len(sequence) <= 303:
g+=1
sequence_name = '>' + name + '_sORF' + str(g)
sORF_seq.append([sequence_name, sequence])
break
if seg_end == 'TGA':
sequence = seq[j:k+3]
if np.mod(len(sequence), 3) == 0 and 12 <= len(sequence) <= 303:
g+=1
sequence_name = '>' + name + '_sORF' + str(g)
sORF_seq.append([sequence_name, sequence])
break
elif len(seq) <= 303 and np.mod(len(seq), 3) != 0:
for j in range(len(seq)-2):
seg_start = seq[j:j+3]
if seg_start == 'ATG':
for k in range(j+3, len(seq)-2, 3):
seg_end = seq[k:k+3]
if seg_end == 'TAA':
sequence = seq[j:k+3]
if np.mod(len(sequence), 3) == 0 and 12 <= len(sequence) <= 303:
g+=1
sequence_name = '>' + name + '_sORF' + str(g)
sORF_seq.append([sequence_name, sequence])
break
if seg_end == 'TAG':
sequence = seq[j:k+3]
if np.mod(len(sequence), 3) == 0 and 12 <= len(sequence) <= 303:
g+=1
sequence_name = '>' + name + '_sORF' + str(g)
sORF_seq.append([sequence_name, sequence])
break
if seg_end == 'TGA':
sequence = seq[j:k+3]
if np.mod(len(sequence), 3) == 0 and 12 <= len(sequence) <= 303:
g+=1
sequence_name = '>' + name + '_sORF' + str(g)
sORF_seq.append([sequence_name, sequence])
break
elif seq[0:3] == 'ATG' and seq[len(seq)-3:len(seq)] == 'TAA' and np.mod(len(seq), 3) == 0 and 12 <= len(seq) <= 303:
sORF_seq.append([name, seq])
elif seq[0:3] == 'ATG' and seq[len(seq)-3:len(seq)] == 'TAG' and np.mod(len(seq), 3) == 0 and 12 <= len(seq) <= 303:
sORF_seq.append([name, seq])
elif seq[0:3] == 'ATG' and seq[len(seq)-3:len(seq)] == 'TGA' and np.mod(len(seq), 3) == 0 and 12 <= len(seq) <= 303:
sORF_seq.append([name, seq])
return sORF_seq
##get protein sequence
def get_protein(fastas):
protein_seq=[]
start_codon = 'ATG'
codon_table = {
'ATA': 'I', 'ATC': 'I', 'ATT': 'I', 'ATG': 'M',
'ACA': 'T', 'ACC': 'T', 'ACG': 'T', 'ACT': 'T',
'AAC': 'N', 'AAT': 'N', 'AAA': 'K', 'AAG': 'K',
'AGC': 'S', 'AGT': 'S', 'AGA': 'R', 'AGG': 'R',
'CTA': 'L', 'CTC': 'L', 'CTG': 'L', 'CTT': 'L',
'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'CCT': 'P',
'CAC': 'H', 'CAT': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGT': 'R',
'GTA': 'V', 'GTC': 'V', 'GTG': 'V', 'GTT': 'V',
'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCT': 'A',
'GAC': 'D', 'GAT': 'D', 'GAA': 'E', 'GAG': 'E',
'GGA': 'G', 'GGC': 'G', 'GGG': 'G', 'GGT': 'G',
'TCA': 'S', 'TCC': 'S', 'TCG': 'S', 'TCT': 'S',
'TTC': 'F', 'TTT': 'F', 'TTA': 'L', 'TTG': 'L',
'TAC': 'Y', 'TAT': 'Y', 'TAA': '', 'TAG': '',
'TGC': 'C', 'TGT': 'C', 'TGA': '', 'TGG': 'W'}
for i in fastas:
name, seq = i[0], re.sub('-', '', i[1])
start_site = re.search(start_codon, seq)
protein = ''
for site in range(start_site.start(), len(seq), 3):
protein = protein + codon_table[seq[site:site+3]]
protein_name = '>Micropeptide_' + name
protein_seq.append([protein_name, protein])
return protein_seq
##extract features
def feature_encode(datapath, dna_seq, protein_seq, s_type, d_type):
if s_type == 'H.sapiens':
if d_type == 'CDS':
c_m = pd.read_csv(datapath + 'human_cds_trainp_6mermean.csv', header=None, delimiter=',')
nc_m = pd.read_csv(datapath + 'human_cds_trainn_6mermean.csv', header=None, delimiter=',')
Tc_pos1 = pd.read_csv(datapath + 'human_cds_trainp_framed_3mer_1.csv', header=None, delimiter=',')
Tc_neg1 = pd.read_csv(datapath + 'human_cds_trainn_framed_3mer_1.csv', header=None, delimiter=',')
Tc_pos2 = pd.read_csv(datapath + 'human_cds_trainp_framed_3mer_2.csv', header=None, delimiter=',')
Tc_neg2 = pd.read_csv(datapath + 'human_cds_trainn_framed_3mer_2.csv', header=None, delimiter=',')
Tc_pos3 = pd.read_csv(datapath + 'human_cds_trainp_framed_3mer_3.csv', header=None, delimiter=',')
Tc_neg3 = pd.read_csv(datapath + 'human_cds_trainn_framed_3mer_3.csv', header=None, delimiter=',')
fea1_1 = np.array(ratio_ORFlength_hcds(dna_seq))
dna_fea = np.array(extract_DNAfeatures(dna_seq, c_m, nc_m, Tc_pos1, Tc_neg1, Tc_pos2, Tc_neg2, Tc_pos3, Tc_neg3, fea1_1))
elif d_type == 'non-CDS':
c_m = pd.read_csv(datapath + 'human_noncds_trainp_6mermean.csv', header=None, delimiter=',')
nc_m = pd.read_csv(datapath + 'human_noncds_trainn_6mermean.csv', header=None, delimiter=',')
Tc_pos1 = pd.read_csv(datapath + 'human_noncds_trainp_framed_3mer_1.csv', header=None, delimiter=',')
Tc_neg1 = pd.read_csv(datapath + 'human_noncds_trainn_framed_3mer_1.csv', header=None, delimiter=',')
Tc_pos2 = pd.read_csv(datapath + 'human_noncds_trainp_framed_3mer_2.csv', header=None, delimiter=',')
Tc_neg2 = pd.read_csv(datapath + 'human_noncds_trainn_framed_3mer_2.csv', header=None, delimiter=',')
Tc_pos3 = pd.read_csv(datapath + 'human_noncds_trainp_framed_3mer_3.csv', header=None, delimiter=',')
Tc_neg3 = pd.read_csv(datapath + 'human_noncds_trainn_framed_3mer_3.csv', header=None, delimiter=',')
fea1_1 = np.array(ratio_ORFlength_hnoncds(dna_seq))
dna_fea = np.array(extract_DNAfeatures(dna_seq, c_m, nc_m, Tc_pos1, Tc_neg1, Tc_pos2, Tc_neg2, Tc_pos3, Tc_neg3, fea1_1))
else:
print("Type error")
elif s_type == 'M.musculus':
if d_type == 'CDS':
c_m = pd.read_csv(datapath + 'mouse_cds_trainp_6mermean.csv', header=None, delimiter=',')
nc_m = pd.read_csv(datapath + 'mouse_cds_trainn_6mermean.csv', header=None, delimiter=',')
Tc_pos1 = pd.read_csv(datapath + 'mouse_cds_trainp_framed_3mer_1.csv', header=None, delimiter=',')
Tc_neg1 = pd.read_csv(datapath + 'mouse_cds_trainn_framed_3mer_1.csv', header=None, delimiter=',')
Tc_pos2 = pd.read_csv(datapath + 'mouse_cds_trainp_framed_3mer_2.csv', header=None, delimiter=',')
Tc_neg2 = pd.read_csv(datapath + 'mouse_cds_trainn_framed_3mer_2.csv', header=None, delimiter=',')
Tc_pos3 = pd.read_csv(datapath + 'mouse_cds_trainp_framed_3mer_3.csv', header=None, delimiter=',')
Tc_neg3 = pd.read_csv(datapath + 'mouse_cds_trainn_framed_3mer_3.csv', header=None, delimiter=',')
fea1_1 = np.array(ratio_ORFlength_mcds(dna_seq))
dna_fea = np.array(extract_DNAfeatures(dna_seq, c_m, nc_m, Tc_pos1, Tc_neg1, Tc_pos2, Tc_neg2, Tc_pos3, Tc_neg3, fea1_1))
elif d_type == 'non-CDS':
c_m = pd.read_csv(datapath + 'mouse_noncds_trainp_6mermean.csv', header=None, delimiter=',')
nc_m = pd.read_csv(datapath + 'mouse_noncds_trainn_6mermean.csv', header=None, delimiter=',')
Tc_pos1 = pd.read_csv(datapath + 'mouse_noncds_trainp_framed_3mer_1.csv', header=None, delimiter=',')
Tc_neg1 = pd.read_csv(datapath + 'mouse_noncds_trainn_framed_3mer_1.csv', header=None, delimiter=',')
Tc_pos2 = pd.read_csv(datapath + 'mouse_noncds_trainp_framed_3mer_2.csv', header=None, delimiter=',')
Tc_neg2 = pd.read_csv(datapath + 'mouse_noncds_trainn_framed_3mer_2.csv', header=None, delimiter=',')
Tc_pos3 = pd.read_csv(datapath + 'mouse_noncds_trainp_framed_3mer_3.csv', header=None, delimiter=',')
Tc_neg3 = pd.read_csv(datapath + 'mouse_noncds_trainn_framed_3mer_3.csv', header=None, delimiter=',')
fea1_1 = np.array(ratio_ORFlength_mnoncds(dna_seq))
dna_fea = np.array(extract_DNAfeatures(dna_seq, c_m, nc_m, Tc_pos1, Tc_neg1, Tc_pos2, Tc_neg2, Tc_pos3, Tc_neg3, fea1_1))
else:
print("Type error")
elif s_type == 'D.melanogaster':
if d_type == 'CDS':
c_m = pd.read_csv(datapath + 'fruitfly_cds_trainp_6mermean.csv', header=None, delimiter=',')
nc_m = pd.read_csv(datapath + 'fruitfly_cds_trainn_6mermean.csv', header=None, delimiter=',')
Tc_pos1 = pd.read_csv(datapath + 'fruitfly_cds_trainp_framed_3mer_1.csv', header=None, delimiter=',')
Tc_neg1 = pd.read_csv(datapath + 'fruitfly_cds_trainn_framed_3mer_1.csv', header=None, delimiter=',')
Tc_pos2 = pd.read_csv(datapath + 'fruitfly_cds_trainp_framed_3mer_2.csv', header=None, delimiter=',')
Tc_neg2 = pd.read_csv(datapath + 'fruitfly_cds_trainn_framed_3mer_2.csv', header=None, delimiter=',')
Tc_pos3 = pd.read_csv(datapath + 'fruitfly_cds_trainp_framed_3mer_3.csv', header=None, delimiter=',')
Tc_neg3 = pd.read_csv(datapath + 'fruitfly_cds_trainn_framed_3mer_3.csv', header=None, delimiter=',')
fea1_1 = np.array(ratio_ORFlength_fcds(dna_seq))
dna_fea = np.array(extract_DNAfeatures(dna_seq, c_m, nc_m, Tc_pos1, Tc_neg1, Tc_pos2, Tc_neg2, Tc_pos3, Tc_neg3, fea1_1))
elif d_type == 'non-CDS':
c_m = pd.read_csv(datapath + 'fruitfly_noncds_trainp_6mermean.csv', header=None, delimiter=',')
nc_m = pd.read_csv(datapath + 'fruitfly_noncds_trainn_6mermean.csv', header=None, delimiter=',')
Tc_pos1 = pd.read_csv(datapath + 'fruitfly_noncds_trainp_framed_3mer_1.csv', header=None, delimiter=',')
Tc_neg1 = pd.read_csv(datapath + 'fruitfly_noncds_trainn_framed_3mer_1.csv', header=None, delimiter=',')
Tc_pos2 = pd.read_csv(datapath + 'fruitfly_noncds_trainp_framed_3mer_2.csv', header=None, delimiter=',')
Tc_neg2 = pd.read_csv(datapath + 'fruitfly_noncds_trainn_framed_3mer_2.csv', header=None, delimiter=',')
Tc_pos3 = pd.read_csv(datapath + 'fruitfly_noncds_trainp_framed_3mer_3.csv', header=None, delimiter=',')
Tc_neg3 = pd.read_csv(datapath + 'fruitfly_noncds_trainn_framed_3mer_3.csv', header=None, delimiter=',')
fea1_1 = np.array(ratio_ORFlength_fnoncds(dna_seq))
dna_fea = np.array(extract_DNAfeatures(dna_seq, c_m, nc_m, Tc_pos1, Tc_neg1, Tc_pos2, Tc_neg2, Tc_pos3, Tc_neg3, fea1_1))
else:
print("Type error")
else:
print("Species error")
protein_fea = np.array(extract_Proteinfeatures(protein_seq))
fea = np.concatenate((dna_fea, protein_fea), axis=1)
filename='feature_name.csv'
with open(filename) as f:
reader = csv.reader(f)
header_row = next(reader)
features = pd.DataFrame(fea[1:, 1:], columns=header_row)
return features
##test model
def test_model(datapath,outpath,datafile,s_type,d_type):
test_sequence = readFasta(datapath + datafile)
sORF_seq = get_sORF(test_sequence)
protein_seq = get_protein(sORF_seq)
fea = feature_encode(datapath, sORF_seq, protein_seq, s_type, d_type)
if s_type=='H.sapiens':
if d_type=='CDS':
trainX = pd.read_csv(datapath + 'human_cds_trainX_feature150.csv').values
trainX_ = trainX.reshape([14464, 10, 15, 1])
y1 = np.array([1] * 7232 + [0] * 7232)
##LightGBM
lgbClf = lgb.LGBMClassifier(boosting_type='gbdt', learning_rate=0.01, n_estimators=400,
max_depth=5, num_leaves=57, max_bin=99, min_data_in_leaf=66,
bagging_fraction=0.8, feature_fraction=0.7,
lambda_l1=0.2604, lambda_l2=0.7363)
lgbClf.fit(trainX, y1)
##Efficien-CapsNet
model = Efficient_CapsNet_sORF150.build_sORF(trainX_.shape[1:], mode='test', verbose=True)
feature_importance = pd.read_csv(datapath + 'human_cds_featureRank_lightgbm.csv')
selector = feature_importance.iloc[0:150, 0].tolist()
fea_sel = fea.loc[:, selector]
fea_sel.to_csv(outpath + 'human_cds_testX_feature150.csv', index=False)
testX = pd.read_csv(outpath + 'human_cds_testX_feature150.csv').values
testX_ = testX.reshape([len(sORF_seq), 10, 15, 1])
lgb_proba_testlabel = lgbClf.predict_proba(testX)
lgb_prob = lgb_proba_testlabel[:, 1]
model_name='human_cds.h5'
elif d_type=='non-CDS':
trainX = pd.read_csv(datapath + 'human_noncds_trainX_feature250.csv').values
trainX_ = trainX.reshape([15500, 10, 25, 1])
y1 = np.array([1] * 7750 + [0] * 7750)
##LightGBM
lgbClf = lgb.LGBMClassifier(boosting_type='gbdt', learning_rate=0.05, n_estimators=300,
max_depth=5, num_leaves=30, max_bin=138, min_data_in_leaf=121,
bagging_fraction=0.9, feature_fraction=0.6,
lambda_l1=0.6399, lambda_l2=0.4156)
lgbClf.fit(trainX, y1)
##Efficien-CapsNet
model = Efficient_CapsNet_sORF250.build_sORF(trainX_.shape[1:], mode='test', verbose=True)
feature_importance = pd.read_csv(datapath + 'human_noncds_featureRank_lightgbm.csv')
selector = feature_importance.iloc[0:250, 0].tolist()
fea_sel = fea.loc[:, selector]
fea_sel.to_csv(outpath + 'human_noncds_testX_feature250.csv', index=False)
testX = pd.read_csv(outpath + 'human_noncds_testX_feature250.csv').values
testX_ = testX.reshape([len(sORF_seq), 10, 25, 1])
lgb_proba_testlabel = lgbClf.predict_proba(testX)
lgb_prob = lgb_proba_testlabel[:, 1]
model_name = 'human_noncds.h5'
else:
print ("Type error")
elif s_type=='M.musculus':
if d_type=='CDS':
trainX = pd.read_csv(datapath + 'mouse_cds_trainX_feature150.csv').values
trainX_ = trainX.reshape([5230, 10, 15, 1])
y1 = np.array([1] * 2615 + [0] * 2615)
##LightGBM
lgbClf = lgb.LGBMClassifier(boosting_type='gbdt', learning_rate=0.1, n_estimators=100,
max_depth=7, num_leaves=41, max_bin=224, min_data_in_leaf=93,
bagging_fraction=0.9, feature_fraction=0.6,
lambda_l1=0.4224, lambda_l2=0.2594)
lgbClf.fit(trainX, y1)
##Efficien-CapsNet
model = Efficient_CapsNet_sORF150.build_sORF(trainX_.shape[1:], mode='test', verbose=True)
feature_importance = pd.read_csv(datapath + 'mouse_cds_featureRank_lightgbm.csv')
selector = feature_importance.iloc[0:150, 0].tolist()
fea_sel = fea.loc[:, selector]
fea_sel.to_csv(outpath + 'mouse_cds_testX_feature150.csv', index=False)
testX = pd.read_csv(outpath + 'mouse_cds_testX_feature150.csv').values
testX_ = testX.reshape([len(sORF_seq), 10, 15, 1])
lgb_proba_testlabel = lgbClf.predict_proba(testX)
lgb_prob = lgb_proba_testlabel[:, 1]
model_name='mouse_cds.h5'
elif d_type=='non-CDS':
trainX = pd.read_csv(datapath + 'mouse_noncds_trainX_feature250.csv').values
trainX_ = trainX.reshape([6132, 10, 25, 1])
y1 = np.array([1] * 3066 + [0] * 3066)
##LightGBM
lgbClf = lgb.LGBMClassifier(boosting_type='gbdt', learning_rate=0.05, n_estimators=300,
max_depth=4, num_leaves=15, max_bin=17, min_data_in_leaf=75,
bagging_fraction=0.5, feature_fraction=0.7,
lambda_l1=0.9026, lambda_l2=0.6507)
lgbClf.fit(trainX, y1)
##Efficien-CapsNet
model = Efficient_CapsNet_sORF250.build_sORF(trainX_.shape[1:], mode='test', verbose=True)
feature_importance = pd.read_csv(datapath + 'mouse_noncds_featureRank_lightgbm.csv')
selector = feature_importance.iloc[0:250, 0].tolist()
fea_sel = fea.loc[:, selector]
fea_sel.to_csv(outpath + 'mouse_noncds_testX_feature250.csv', index=False)
testX = pd.read_csv(outpath + 'mouse_noncds_testX_feature250.csv').values
testX_ = testX.reshape([len(sORF_seq), 10, 25, 1])
lgb_proba_testlabel = lgbClf.predict_proba(testX)
lgb_prob = lgb_proba_testlabel[:, 1]
model_name = 'mouse_noncds.h5'
else:
print ("Type error")
elif s_type=='D.melanogaster':
if d_type=='CDS':
trainX = pd.read_csv(datapath + 'fruitfly_cds_trainX_feature150.csv').values
trainX_ = trainX.reshape([1364, 10, 15, 1])
y1 = np.array([1] * 682 + [0] * 682)
##LightGBM
lgbClf = lgb.LGBMClassifier(boosting_type='gbdt', learning_rate=0.1, n_estimators=100,
max_depth=5, num_leaves=25, max_bin=48, min_data_in_leaf=94,
bagging_fraction=0.5, feature_fraction=0.8,
lambda_l1=0.3255, lambda_l2=0.5864)
lgbClf.fit(trainX, y1)
##Efficien-CapsNet
model = Efficient_CapsNet_sORF150.build_sORF(trainX_.shape[1:], mode='test', verbose=True)
feature_importance = pd.read_csv(datapath + 'fruitfly_cds_featureRank_lightgbm.csv')
selector = feature_importance.iloc[0:150, 0].tolist()
fea_sel = fea.loc[:, selector]
fea_sel.to_csv(outpath + 'fruitfly_cds_testX_feature150.csv', index=False)
testX = pd.read_csv(outpath + 'fruitfly_cds_testX_feature150.csv').values
testX_ = testX.reshape([len(sORF_seq), 10, 15, 1])
lgb_proba_testlabel = lgbClf.predict_proba(testX)
lgb_prob = lgb_proba_testlabel[:, 1]
model_name='fruitfly_cds.h5'
elif d_type=='non-CDS':
trainX = pd.read_csv(datapath + 'fruitfly_noncds_trainX_feature150.csv').values
trainX_ = trainX.reshape([13956, 10, 15, 1])
y1 = np.array([1] * 6978 + [0] * 6978)
##LightGBM
lgbClf = lgb.LGBMClassifier(boosting_type='gbdt', learning_rate=0.05, n_estimators=300,
max_depth=5, num_leaves=30, max_bin=225, min_data_in_leaf=129,
bagging_fraction=0.9, feature_fraction=0.9,
lambda_l1=0.2516, lambda_l2=0.0407)
lgbClf.fit(trainX, y1)
##Efficien-CapsNet
model = Efficient_CapsNet_sORF150.build_sORF(trainX_.shape[1:], mode='test', verbose=True)
feature_importance = pd.read_csv(datapath + 'fruitfly_noncds_featureRank_lightgbm.csv')
selector = feature_importance.iloc[0:150, 0].tolist()
fea_sel = fea.loc[:, selector]
fea_sel.to_csv(outpath + 'fruitfly_noncds_testX_feature150.csv', index=False)
testX =
|
pd.read_csv(outpath + 'fruitfly_noncds_testX_feature150.csv')
|
pandas.read_csv
|
import pandas as pd
import matplotlib.pyplot as plt
def seed_count_time_scatter(data, title):
"""
Produces a scatter plot based on the seeds visited (per crawl) and time.
:param data: array
:param title: str
:return: None
"""
data = pd.DataFrame(data, columns=['Crawl', 'Elapsed Time (s)', 'Seeds Visited (per crawl)'])
data.plot.scatter(y='Elapsed Time (s)',
x='Seeds Visited (per crawl)')
plt.title(title)
def request_count_time_scatter(data, title):
"""
Produces a scatter plot based on the number of links visited per crawl and time.
:param data: array
:param title: str
:return: None
"""
data = pd.DataFrame(data, columns=['Crawl', 'Elapsed Time (s)', 'Total Links Visited (per crawl)'])
data.plot.scatter(y='Elapsed Time (s)',
x='Total Links Visited (per crawl)')
plt.title(title)
def seed_count_time_scatter_3d(data, title):
"""
Produces a three-dimensional scatter plot of seeds and links visited per crawl against time.
The third dimension (time) is represented by colour.
:param data: array
:param title: str
:return: None
"""
data =
|
pd.DataFrame(data, columns=['Crawl', 'Elapsed Time (s)', 'Seeds Visited (per crawl)', 'Total Links Visited (per crawl)'])
|
pandas.DataFrame
|
"""Store the data in a nice big dataframe"""
import sys
from datetime import datetime, timedelta
import pandas as pd
import geopandas as gpd
import numpy as np
class Combine:
"""Combine defined countries together"""
THE_EU = [ 'Austria', 'Italy', 'Belgium', 'Latvia',
'Bulgaria', 'Lithuania', 'Croatia',
'Luxembourg', 'Cyprus', 'Malta',
'Czechia', 'Netherlands', 'Denmark',
'Poland', 'Estonia', 'Portugal',
'Finland', 'Romania', 'France',
'Slovakia', 'Germany', 'Slovenia',
'Greece', 'Spain', 'Hungary',
'Sweden', 'Ireland' ]
def __init__(self, options):
"""Init"""
self.options = options
self.timeseries = []
self.countries = None
self.description = None
self.merged = None
self.cc = None
self.populations = []
self.national_populations = None
self.get_populations()
self.countries_long = {'nl': 'The Netherlands', 'sco': 'Scotland', 'eng': 'England',
'wal': 'Wales', 'ni': 'Northern Ireland'}
self.jhu = JHU(self)
def judat(self):
"""Dumb helper for another library"""
self.timeseries.append(NLTimeseries(False).national(False))
self.combine_national(False)
#self.merged['Week'] = self.merged.index.strftime('%U')
#self.merged = self.merged.groupby(['Week']) \
#.agg({'Aantal': 'sum'})
print(self.merged)
def process(self):
"""Do it"""
cumulative = False
if self.options.pivot:
cumulative = True
for nation in self.cc:
usejhu = True
if self.options.nation:
print(f'Processing National data {nation}')
if nation in ['wal', 'sco', 'eng']:
self.timeseries.append(UKTimeseries(False).national(nation,cumulative))
usejhu = False
#if nation == 'nl':
#self.timeseries.append(NLTimeseries(False).national(cumulative))
#usejhu = False
if usejhu:
self.timeseries.append(XXTimeseries(False,
{nation: self.countries_long[nation]}).national(cumulative))
else:
print(f'Processing combined data {nation}')
if nation in ['wal', 'sco', 'eng']:
self.timeseries.append(UKTimeseries(True).get_data())
usejhu = False
if nation == 'nl':
self.timeseries.append(NLTimeseries(True).get_data())
usejhu = False
if usejhu:
self.timeseries.append(XXTimeseries(True).get_data())
if len(self.timeseries) == 0:
print('No country Data to process')
sys.exit()
if self.options.pivot:
self.combine_pivot()
return
if self.options.nation:
self.combine_national()
return
self.get_combined_data()
def combine_pivot(self):
"""Pivot data for pandas_alive"""
print('Pivotting data')
self.merged = pd.concat(self.timeseries)
self.merged['Datum'] = pd.to_datetime(self.merged['Datum'])
# So we can add it as an option later
column = 'Overleden'
#column = 'Aantal'
# Convert to 100K instead of millions
for country in self.cc:
self.merged.loc[(self.merged.country == country), 'population'] \
= self.national_populations[country] * 10
# Per-Capita
self.merged[column] = self.merged[column] / self.merged['population']
self.merged = self.merged.pivot(index='Datum',
columns='country',
values=column).fillna(0)
self.trim_data()
print(self.merged)
def combine_national(self, trim=True):
"""Combine national totals"""
self.merged = pd.concat(self.timeseries)
self.merged['Datum'] = pd.to_datetime(self.merged['Datum'])
self.merged = self.merged.set_index('Datum')
self.merged.sort_index(inplace=True)
for country in self.cc:
self.merged.loc[(self.merged.country == country), 'population'] \
= self.national_populations[country] * 10
self.merged.loc[(self.merged.country == country), 'cname'] \
= self.countries_long[country]
for column in ['Aantal', 'Ziekenhuisopname', 'Overleden']:
if column not in self.merged.columns:
continue
pgpd = f"{column}-gpd"
radaily = f"{column}-radaily"
raweekly = f"{column}-raweekly"
ranonpc = f"{column}-ranonpc"
self.merged[pgpd] = self.merged[column] / self.merged['population']
self.merged[radaily] = self.merged.groupby('country',
sort=False)[pgpd] \
.transform(lambda x: x.rolling(7, 1).mean())
self.merged[raweekly] = self.merged.groupby('country',
sort=False)[pgpd] \
.transform(lambda x: x.rolling(7).sum())
self.merged[ranonpc] = self.merged.groupby('country',
sort=False)[column] \
.transform(lambda x: x.rolling(7).sum())
if(trim):
self.trim_data()
def trim_data(self):
if self.options.startdate is not None:
self.merged = self.merged.query(f'{self.options.startdate} <= Datum')
if self.options.enddate is not None:
self.merged = self.merged.query(f'Datum <= {self.options.enddate}')
def get(self):
"""Return the data set"""
return self.merged
def get_populations(self):
"""National populations for the calculations that need it"""
self.national_populations = pd.read_csv('data/populations.csv', delimiter=',',
index_col=0, header=None, squeeze=True).to_dict()
def get_max(self, column):
"""Max value in df"""
return self.merged[column].max()
def get_combined_data(self):
"""Get a single dataframe containing all countries we deal with
I did this so I could draw combined chorpleths but that has Proven
to be somewhat more challenging than I originally thought
"""
print('Calculating combined data')
dataframe = pd.concat(self.timeseries)
dataframe = dataframe.set_index('Datum')
dataframe = dataframe.sort_index()
dataframe['pop_pc'] = dataframe['population'] / 1e5
# Filter out countries we do not want
for country in self.countries:
dataframe = dataframe[~dataframe['country'].isin([country])]
# Finally create smoothed columns
dataframe['radaily'] = dataframe.groupby('Gemeentecode',
sort=False)['Aantal'] \
.transform(lambda x: x.rolling(7, 1).mean())
dataframe['weekly'] = dataframe.groupby('Gemeentecode',
sort=False)['Aantal'] \
.transform(lambda x: x.rolling(7).sum())
dataframe['radaily_pc'] = dataframe['radaily'] / dataframe['pop_pc']
dataframe['weekly_pc'] = dataframe['weekly'] / dataframe['pop_pc']
if self.options.startdate is not None:
dataframe = dataframe.query(f'{self.options.startdate} <= Datum')
if self.options.enddate is not None:
dataframe = dataframe.query(f'Datum <= {self.options.enddate}')
print('Finished calculating combined data')
self.merged = dataframe
def parse_countries(self, country_str):
"""Sort out country data"""
ret = []
if country_str is None:
country_list = self.countries_long.keys()
else:
country_list = country_str.split(',')
if 'eu' in country_list:
country_list.remove('eu')
country_list += self.THE_EU
print('Setting EU')
for country in country_list:
country = country.lower()
count = None
#if 'nether' in country:
#count = 'nl'
if 'scot' in country:
count = 'sco'
if 'eng' in country:
count = 'eng'
if 'wal' in country:
count = 'wal'
#if 'ni' in country:
# count = 'ni'
if count is not None:
ret.append(count)
else:
retcountry = self.jhu.get_country(country)
if retcountry:
ret.append(retcountry)
self.cc = ret
self.countries = list(set(self.countries_long.keys()) - set(ret))
self.description = '_'.join(ret)
def project_for_date(self, date):
"""Project infections per Gemeente and make league table"""
if date is None:
date = self.merged.index.max().strftime('%Y%m%d')
datemax = datetime.datetime.strptime(date, '%Y%m%d')
datemin = (datemax - timedelta(days=4)).strftime('%Y%m%d')
self.merged = self.merged.query(f'{datemin} <= Datum <= {date}')
self.merged = self.merged.groupby(['Gemeentecode']) \
.agg({'Aantal': 'sum', 'Gemeentenaam': 'first',
'pop_pc': 'first', 'population': 'first', 'country': 'first'})
self.merged['percapita'] = self.merged['Aantal'] / self.merged['pop_pc']
self.merged.sort_values(by=['percapita'], ascending=False, inplace=True)
class Timeseries:
"""Abstract class for timeseries"""
def __init__(self, process=True):
self.merged = None
self.cumulative = False
if process:
self.get_pop()
self.get_map()
self.get_source_data()
def get_data(self):
"""Pass back the data series"""
return self.merged
def get_source_data(self):
"""Placeholder"""
def get_pop(self):
"""Placeholder"""
def get_map(self):
"""Placeholder"""
def set_cumulative(self, value):
"""Daily or cumulative"""
self.cumulative = value
class JHU:
"""Get data from <NAME>"""
JHD = '../COVID-19/csse_covid_19_data'
def __init__(self, combined):
"""Init"""
self.dataframe = None
self.combined = combined
self.load()
def get_country(self, country):
"""Check Country is in JHU data"""
row = self.dataframe.loc[self.dataframe['Combined_Key'] == country]
if len(row) == 0:
return False
self.combined.countries_long[row['iso2'].values[0].lower()] = country
self.combined.national_populations[row['iso2'].values[0].lower()] \
= row['Population'].values[0]
return row['iso2'].values[0].lower()
def load(self):
"""Load JHU lookup table"""
dataframe = pd.read_csv(f'{self.JHD}/UID_ISO_FIPS_LookUp_Table.csv',
delimiter=',')
dataframe['Combined_Key'] = dataframe['Combined_Key'].str.lower()
dataframe['Population'] = dataframe['Population'] / 1e6
self.dataframe = dataframe
class XXTimeseries(Timeseries):
"""Generic JHU Data class"""
# TODO: Duplicated code
JHD = '../COVID-19/csse_covid_19_data'
def __init__(self, process=True, country=None):
"""Init"""
Timeseries.__init__(self, process)
print(country.keys())
self.countrycode = list(country.keys())[0]
self.country = country[self.countrycode]
self.cumullative = False
def national(self, cumulative):
self.set_cumulative(cumulative)
"""Get columns"""
timeseries = 'csse_covid_19_time_series/time_series_covid19_deaths_global.csv'
overleden = self.calculate(timeseries, 'Overleden')
timeseries = 'csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
aantal = self.calculate(timeseries, 'Aantal')
aantal['Overleden'] = overleden['Overleden']
return aantal.assign(country=self.countrycode)
def calculate(self, timeseries, column):
"""Get national totals"""
file = f'{self.JHD}/{timeseries}'
dataframe = pd.read_csv(file, delimiter=',')
dataframe['Country/Region'] = dataframe['Country/Region'].str.lower()
row = dataframe.loc[dataframe['Country/Region'] == self.country]
row = row.loc[row['Province/State'].isnull()]
row = row.reset_index(drop=True)
row.drop(columns=['Province/State', 'Lat', 'Long'], inplace=True)
row.set_index('Country/Region', inplace=True)
dataframe = row.T
if not self.cumulative:
dataframe[column] = dataframe[self.country] - dataframe[self.country].shift(1)
else:
dataframe[column] = dataframe[self.country]
dataframe.drop(columns=[self.country], inplace=True)
dataframe.dropna(inplace=True)
dataframe = dataframe.reset_index()
dataframe.rename(columns={'index': 'Datum'}, inplace=True)
return dataframe
class BETimeseries(Timeseries):
"""Belgium data"""
def __init__(self, process=True):
"""Init"""
Timeseries.__init__(self, process)
def national(self, cumulative):
"""Get national totals"""
self.set_cumulative(cumulative)
dataframe = pd.read_csv('data/belgiumt.csv',
delimiter=',')
dataframe.dropna(inplace=True)
dataframe.rename(columns={'CASES': 'Aantal', 'DATE': 'Datum'}, inplace=True)
dataframe = dataframe.groupby(['Datum']).agg({'Aantal': 'sum'})
dataframe = dataframe.reset_index()
dataframe = dataframe.assign(country='be')
return dataframe
def get_source_data(self):
"""Get BE source data for infections"""
dataframe = pd.read_csv('data/belgium.csv', delimiter=',')
dataframe.dropna(inplace=True)
dataframe['CASES'] = dataframe['CASES'].replace(['<5'], '0')
dataframe.rename(columns={'CASES': 'Aantal',
'NIS5': 'Gemeentecode',
'DATE': 'Datum',
'TX_DESCR_NL': 'Gemeentenaam'}, inplace=True)
dataframe.drop(columns=['TX_DESCR_FR', 'TX_ADM_DSTR_DESCR_NL', 'TX_ADM_DSTR_DESCR_FR',
'PROVINCE', 'REGION'], inplace=True)
dataframe['Datum'] = pd.to_datetime(dataframe['Datum'])
dataframe = self.resample(dataframe)
dataframe = dataframe.set_index('Gemeentecode').dropna()
merged = dataframe.join(self.pop)
merged.index = merged.index.astype('int')
# merged.reset_index(inplace=True)
merged = merged.join(self.map)
merged.reset_index(inplace=True)
merged.rename(columns={'index': 'Gemeentecode'}, inplace=True)
merged = merged.assign(country='be')
self.merged = merged
def resample(self, dataframe):
"""Timeseries is incomplete, fill it in"""
# Normally you would just resample but we have 1500 od gemeentes each needs a
# completed dataseries or chorpleths will look very odd
idx = pd.date_range(min(dataframe.Datum), max(dataframe.Datum))
gems = list(set(dataframe['Gemeentecode'].values))
newdata = []
for gem in gems:
gemdf = dataframe[dataframe['Gemeentecode'] == gem]
#gemdf.set_index(gemdf.Datum, inplace=True)
default = self.get_row(gemdf.loc[gemdf['Gemeentecode'] == gem])
gemdf['strdate'] = gemdf['Datum'].dt.strftime('%Y-%m-%d')
for date in idx:
fdate = date.strftime('%Y-%m-%d')
if fdate not in gemdf['strdate'].values:
newdata.append({'Datum': date,
'Gemeentecode': default['Gemeentecode'],
'Gemeentenaam': default['Gemeentenaam'],
'Aantal': default['Aantal']
})
else:
row = gemdf.loc[gemdf['Datum'] == date]
newdata.append({'Datum': date,
'Gemeentecode': row['Gemeentecode'].values[0],
'Gemeentenaam': row['Gemeentenaam'].values[0],
'Aantal': int(row['Aantal'].values[0])
})
return pd.DataFrame(newdata)
def get_row(self, series):
"""Return one row"""
return {'Datum': series['Datum'].values[0],
'Gemeentecode': series['Gemeentecode'].values[0],
'Gemeentenaam': series['Gemeentenaam'].values[0],
'Aantal': series['Aantal'].values[0]
}
def get_pop(self):
"""Fetch the Population figures for BE"""
pop = pd.read_csv('data/bepop.csv', delimiter=',')
pop = pop.set_index('Gemeentecode')
self.pop = pop
def get_map(self):
"""Get BE map data"""
map_df = gpd.read_file('maps/BELGIUM_-_Municipalities.shp')
map_df.rename(columns={'CODE_INS': 'Gemeentecode',
'ADMUNADU': 'Gemeentenaam'}, inplace=True)
map_df['Gemeentecode'] = map_df['Gemeentecode'].astype('int')
map_df.drop(columns=['Gemeentenaam'], inplace=True)
map_df = map_df.set_index('Gemeentecode')
self.map = map_df
class NLTimeseries(Timeseries):
"""Dutch Timeseries"""
def __init__(self, process=True):
"""Init"""
Timeseries.__init__(self, process)
def national(self, cumulative):
"""Get national totals"""
self.set_cumulative(cumulative)
df1 = self.get_subtotal('Totaal', rename=False, cumulative=self.cumulative)
df2 = self.get_subtotal('Overleden', rename=True, cumulative=self.cumulative)
df3 = self.get_subtotal('Ziekenhuisopname', rename=True, cumulative=self.cumulative)
dataframe = df1.merge(df2, on='Datum')
dataframe = dataframe.merge(df3, on='Datum')
dataframe = dataframe.assign(country='nl')
#dataframe = pd.concat([df1,df2,df3])
return dataframe
def get_subtotal(self, typ, rename=True, cumulative=True):
"""Get national totals"""
dataframe = pd.read_csv('../CoronaWatchNL/data/rivm_NL_covid19_national.csv',
delimiter=',')
dataframe = dataframe[dataframe['Type'] == typ]
dataframe.drop(columns=['Type'], inplace=True)
if not cumulative:
print('Cumulative')
dataframe['Aantal'] = dataframe['Aantal'] - dataframe['Aantal'].shift(1)
if rename:
dataframe.rename(columns={"Aantal": typ}, inplace=True)
dataframe = dataframe.fillna(0)
dataframe['Datum'] = pd.to_datetime(dataframe['Datum'])
return dataframe
def get_pop(self):
"""Fetch the Population figures for NL"""
dataframe = pd.read_csv(
'data/Regionale_kerncijfers_Nederland_31082020_181423.csv', delimiter=';')
dataframe = dataframe.set_index("Regio")
dataframe.rename(columns={"aantal": "population"}, inplace=True)
self.pop = dataframe[dataframe.columns[dataframe.columns.isin(['population'])]]
def get_source_data(self):
"""Get NL source data for infections"""
dataframe = pd.read_csv('../CoronaWatchNL/data-geo/data-municipal/RIVM_NL_municipal.csv',
delimiter=',')
dataframe = dataframe[dataframe['Type'] == 'Totaal']
dataframe['Datum'] = pd.to_datetime(dataframe['Datum'])
dataframe.drop(columns=['Type', 'Provincienaam', 'Provinciecode'], inplace=True)
dataframe.dropna(inplace=True)
dataframe = dataframe.set_index('Gemeentenaam').dropna()
nlmerged = dataframe.join(self.pop)
nlmerged.reset_index(inplace=True)
nlmerged.rename(columns={'index': 'Gemeentenaam'}, inplace=True)
nlmerged = nlmerged.set_index('Gemeentecode')
nlmerged = nlmerged.join(self.map)
nlmerged.reset_index(inplace=True)
nlmerged = nlmerged.assign(country='nl')
self.merged = nlmerged
def get_map(self):
"""Get NL map data"""
map_df = gpd.read_file('maps/gemeente-2019.geojson')
#map_df = map_df.reset_index(inplace=True)
map_df.rename(columns={'Gemeenten_': 'Gemeentecode'}, inplace=True)
map_df = map_df.set_index("Gemeentecode")
map_df.drop(columns=['Gemnr', 'Shape_Leng', 'Shape_Area'], inplace=True)
self.map = map_df
class UKTimeseries(Timeseries):
"""UK Timeseries"""
def __init__(self, process=True):
"""Init"""
Timeseries.__init__(self, process)
def national(self, country, cumulative):
"""Use national totals"""
self.set_cumulative(cumulative)
dataframe = pd.read_csv('data/ukt.csv')
if cumulative:
dataframe.rename(columns={"cumCasesBySpecimenDate": "Aantal", 'date': 'Datum',
'areaCode': 'Gemeentecode',
'cumDeaths28DaysByPublishDate': 'Overleden'}, inplace=True)
else:
dataframe.rename(columns={"newCasesBySpecimenDate": "Aantal", 'date': 'Datum',
'areaCode': 'Gemeentecode',
'newDeaths28DaysByPublishDate': 'Overleden'}, inplace=True)
dataframe.loc[(dataframe.Gemeentecode.astype(str).str.startswith('S')), 'country'] = 'sco'
dataframe.loc[(dataframe.Gemeentecode.astype(str).str.startswith('W')), 'country'] = 'wal'
dataframe.loc[(dataframe.Gemeentecode.astype(str).str.startswith('E')), 'country'] = 'eng'
dataframe.loc[(dataframe.Gemeentecode.astype(str).str.startswith('N')), 'country'] = 'ni'
dataframe = dataframe[dataframe['country'] == country]
return dataframe
def get_pop(self):
"""Fetch the population figures for the UK"""
dataframe = pd.read_csv('data/ukpop4.csv')
dataframe = dataframe.set_index("ladcode20")
dataframe = dataframe.groupby(['ladcode20']).agg(sum)
dataframe.rename(columns={"population_2019": "population"}, inplace=True)
self.pop = dataframe[dataframe.columns[dataframe.columns.isin(['population'])]]
def get_source_data(self):
"""Get UK source data for infections"""
dataframe = pd.read_csv('data/uk.csv', delimiter=',')
dataframe['date'] = pd.to_datetime(dataframe['date'])
columns = {'date': 'Datum', 'areaName': 'Gemeentenaam',
'areaCode': 'Gemeentecode', 'newCasesBySpecimenDate': 'Aantal',
'cumCasesBySpecimenDate': 'AantalCumulatief'
}
dataframe.rename(columns=columns, inplace=True)
dataframe = dataframe.set_index('Gemeentecode').dropna()
ukmerged = dataframe.join(self.pop)
ukmerged = ukmerged.join(self.map)
ukmerged.reset_index(inplace=True)
ukmerged.rename(columns={'index': 'Gemeentecode'}, inplace=True)
# <ark the countries for later filtering
ukmerged.loc[(ukmerged.Gemeentecode.astype(str).str.startswith('S')), 'country'] = 'sco'
ukmerged.loc[(ukmerged.Gemeentecode.astype(str).str.startswith('W')), 'country'] = 'wal'
ukmerged.loc[(ukmerged.Gemeentecode.astype(str).str.startswith('E')), 'country'] = 'eng'
ukmerged.loc[(ukmerged.Gemeentecode.astype(str).str.startswith('N')), 'country'] = 'ni'
self.merged = ukmerged
def get_map(self):
"""Get UK Map Data"""
map_df = gpd.read_file('maps/uk_counties_2020.geojson')
# Scotland
# map_df = map_df[~map_df['lad19cd'].astype(str).str.startswith('S')]
# Northern Ireland
# map_df = map_df[~map_df['lad19cd'].astype(str).str.startswith('N')]
map_df.rename(columns={'lad19cd': 'Gemeentecode'}, inplace=True)
map_df.drop(columns=['lad19nm', 'lad19nmw', 'st_areashape',
'st_lengthshape', 'bng_e', 'bng_n', 'long', 'lat'], inplace=True)
map_df = map_df.set_index("Gemeentecode")
self.map = map_df
class DETimeseries(Timeseries):
"""DE Timeseries"""
def __init__(self, process=True):
"""Init"""
Timeseries.__init__(self, process)
def get_pop(self):
"""Fetch the population figures for the DE"""
dataframe = pd.read_csv('data/depop.csv')
dataframe = dataframe.set_index("Gemeentenaam")
self.pop = dataframe
def get_source_data(self):
"""Get DE source data for infections"""
dataframe = pd.read_excel(
'data/germany.xlsx', sheet_name='BL_7-Tage-Fallzahlen', skiprows=[0, 1])
# Rename columns
dataframe.rename(columns={'Unnamed: 0': 'Gemeentenaam'}, inplace=True)
dataframe = dataframe.set_index('Gemeentenaam')
dataframe = dataframe.T
transform = []
for index, row in dataframe.iterrows():
for region in row.keys():
transform.append({'Datum': row.name, 'Aantal': row[region], 'Gemeentenaam': region})
dataframe = pd.DataFrame(transform)
dataframe['Datum'] =
|
pd.to_datetime(dataframe['Datum'])
|
pandas.to_datetime
|
import numpy as np
import pytest
from pandas.errors import NullFrequencyError
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
NaT,
Series,
TimedeltaIndex,
date_range,
offsets,
)
import pandas._testing as tm
from pandas.tseries.offsets import BDay
class TestShift:
@pytest.mark.parametrize(
"ser",
[
Series([np.arange(5)]),
date_range("1/1/2011", periods=24, freq="H"),
Series(range(5), index=date_range("2017", periods=5)),
],
)
@pytest.mark.parametrize("shift_size", [0, 1, 2])
def test_shift_always_copy(self, ser, shift_size):
# GH22397
assert ser.shift(shift_size) is not ser
@pytest.mark.parametrize("move_by_freq", [pd.Timedelta("1D"), pd.Timedelta("1min")])
def test_datetime_shift_always_copy(self, move_by_freq):
# GH#22397
ser = Series(range(5), index=date_range("2017", periods=5))
assert ser.shift(freq=move_by_freq) is not ser
def test_shift(self, datetime_series):
shifted = datetime_series.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, datetime_series.index)
tm.assert_index_equal(unshifted.index, datetime_series.index)
tm.assert_numpy_array_equal(
unshifted.dropna().values, datetime_series.values[:-1]
)
offset = BDay()
shifted = datetime_series.shift(1, freq=offset)
unshifted = shifted.shift(-1, freq=offset)
tm.assert_series_equal(unshifted, datetime_series)
unshifted = datetime_series.shift(0, freq=offset)
tm.assert_series_equal(unshifted, datetime_series)
shifted = datetime_series.shift(1, freq="B")
unshifted = shifted.shift(-1, freq="B")
tm.assert_series_equal(unshifted, datetime_series)
# corner case
unshifted = datetime_series.shift(0)
tm.assert_series_equal(unshifted, datetime_series)
# Shifting with PeriodIndex
ps = tm.makePeriodSeries()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(unshifted.dropna().values, ps.values[:-1])
shifted2 = ps.shift(1, "B")
shifted3 = ps.shift(1, BDay())
tm.assert_series_equal(shifted2, shifted3)
tm.assert_series_equal(ps, shifted2.shift(-1, "B"))
msg = "Given freq D does not match PeriodIndex freq B"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="D")
# legacy support
shifted4 = ps.shift(1, freq="B")
tm.assert_series_equal(shifted2, shifted4)
shifted5 = ps.shift(1, freq=
|
BDay()
|
pandas.tseries.offsets.BDay
|
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import os
siemens_data = os.path.join(os.path.dirname(__file__), 'SIEGY.csv')
df = pd.read_csv(siemens_data)
df = df.set_index(
|
pd.DatetimeIndex(df['Date'].values)
|
pandas.DatetimeIndex
|
#!/home/balint/virtualenvs/py34/bin/python
import numpy as np
import pandas as pd
from datetime import date, datetime, time
#import matplotlib.pyplot as plt
#import seaborn
import os
import sys
import argparse
"""
Reads all prices, assumed to be named like 2017-01-03_prices.csv
Reads all trades, assumed to be named like 2017-01-03_trades.csv
Prices should be formatted like:
times,price
2017-01-04 09:00:00,3052.03
2017-01-04 09:00:01,3051.94
2017-01-04 09:00:02,3052.12
Trades should be formatted like:
times,trades
2017-01-04 09:00:00,-2.0
2017-01-04 09:00:01,-2.0
2017-01-04 09:00:02,-1.0
Every second is assumed to have a trade, even if it's zero
"""
def read_all(which='_trades.csv'):
list_ = []
files = [f for f in os.listdir('.') if which in f]
files.sort()
for cf in files:
df = pd.read_csv(cf, index_col='times', parse_dates=True)
list_.append(df)
return pd.concat(list_).sort_index()
def main():
parser = argparse.ArgumentParser(description='Evaluate trades')
parser.add_argument(
"-t", "--teamname", help="Name of the team for outputting results", metavar="TEAMNAME", default='Test Team')
args = parser.parse_args()
transaction_cost = 0.0
prices = read_all('_prices.csv')
print('All prices read')
trades = read_all('_trades.csv')
print('All trades read')
prices['trades'] = trades
prices['position'] = prices.trades.cumsum()
prices['invested'] = prices.position * prices.price
prices['trade_cost'] = prices.trades * prices.price
prices['transaction_cost'] = np.abs(prices.trades) * transaction_cost
prices['cum_transaction_cost'] = prices.transaction_cost.cumsum()
prices['PnL'] = (prices.trade_cost - prices.transaction_cost)
prices['cum_pnl'] = prices.PnL.cumsum() - (prices.price * prices.position)
daily_sums = prices.groupby(
|
pd.TimeGrouper('1D')
|
pandas.TimeGrouper
|
# pip install bs4 lxml
import time
import re
import json
import os
from bs4 import BeautifulSoup
import pandas as pd
import functions as func
from settings import Settings as st
class Songs:
def __init__(self, keyword,limit):
# 初始歌单
self.only_lyric = []
self.plist = None
self.keyword = keyword
self.limit = limit
def get_plist_songs(self, url):
if self.plist is None:
self.plist = pd.DataFrame(columns=['id','name','url'])
# 建立歌单index
content = func.get_page(url).text
# 新建bs对象
soup = BeautifulSoup(content, 'lxml')
soup_plist = soup.find(name='ul',
attrs={'class': 'f-hide'})
# 根据toggle传递csv文件名
if not st.toggle:
st.csv_fname = st.playlist_title = soup.find(name='h2', class_='f-ff2').string
# 筛选数据
songs = {'id': [], 'name': [], 'url': []}
for song in soup_plist.find_all(name='li'):
# id
id = re.search('=([0-9]+)', song.a['href'])
# 避免重复记录歌名
id_foo = id.group(1)
if id_foo not in self.plist['id']:
songs['id'].append(id_foo)
# name
song_name = song.a.string
songs['name'].append(song_name)
# url
song_url = 'https://music.163.com' + song.a['href']
songs['url'].append(song_url)
songs['lyric'] = ''
df = pd.DataFrame(songs,columns=['id', 'name', 'url'])
self.plist = self.plist.append(df,ignore_index=True)
def get_lyric(self):
"""获得歌词"""
file_path = 'res/' + self.keyword + '-build-list.csv'
if not os.path.exists(file_path):
print('file not found')
exit(-1)
plist = pd.read_csv(file_path)
plist = plist.drop('Unnamed: 0',axis=1)
plist_temp = pd.DataFrame(columns=plist.columns)
total = len(plist['id'])
n=0
for index,row in plist.iterrows():
url = 'http://music.163.com/api/song/lyric?os=pc&id=' \
+ str(row['id']) \
+ '&lv=-1&kv=-1&tv='
# 获得歌词内容
content = func.get_page(url).json()
if 'lrc' in content and 'nolyric' not in content and content['lrc'] is not None:
lyric = content['lrc']['lyric']
# 清洗歌词
try:
lyric = re.sub('\[.*?\]', '', lyric)
row['lyric'] = lyric
print('completed ' + str(round(index / total * 100, 2)) + '% ', end='')
print('added lyric id: ' + str(row['id']))
plist_temp = plist_temp.append(row,ignore_index=True)
except:
continue
n+=1
if n == 300:
break
plist = plist_temp.copy()
plist.to_csv('res/'+st.search_keyword + '-with-lyrics.csv', encoding='UTF-8')
class Playlists(Songs):
def __init__(self,keyword,limit):
super().__init__(keyword=keyword,limit=limit)
self.playlists = []
def get_playlists(self):
if not os.path.exists('res/'+self.keyword+'.json'):
url = 'http://music.163.com/api/search/get/web?csrf_token=hlpretag=&hlposttag=&s={' \
+ self.keyword + '}&type=1000&offset=0&order=hot&total=true&limit=' + str(self.limit)
json_content = func.get_page(url).json()
with open('res/' + self.keyword + '.json', 'w', encoding='UTF-8') as f1:
text = json.dumps(json_content, ensure_ascii=False)
f1.write(text)
with open('res/' + self.keyword + '.json', encoding='UTF-8') as f2:
p_json = json.load(f2)
result = p_json['result']
self.playlists = result['playlists']
return self.playlists
def recur_playlists(self):
"""递归补充歌单列表、歌曲信息"""
if not os.path.exists('res/' + self.keyword + '-build-list.csv'):
time.sleep(2)
self.plist =
|
pd.DataFrame(columns=['id','name','url'])
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Utility function for the BioMag Dementia Challenge.
(c) 2021 <NAME>, Technische Universitaet Ilmenau
'''
from difflib import get_close_matches
from distutils.version import LooseVersion
import operator
import os
import os.path as op
import sys
from pathlib import Path
import numpy as np
from _ctypes import PyObj_FromPtr
import json
import re
import pip
import pandas as pd
import warnings
from scipy import io
import pathlib
import functools
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used."""
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning) # turn off filter
warnings.warn("Call to deprecated function {}.".format(func.__name__),
category=DeprecationWarning,
stacklevel=2)
warnings.simplefilter('default', DeprecationWarning) # reset filter
return func(*args, **kwargs)
return new_func
def split_subject_dict(subjects):
num_subjects = len(subjects['control'])+len(subjects['dementia'])+len(subjects['mci'])
small_subjects = []
counter = 0
for k in subjects.keys():
for subid in subjects[k]:
cd = {'control':[],'dementia':[],'mci':[]}
cd[k] = [subid]
small_subjects+=[cd]
counter+=1
return small_subjects
def load_bad_samples(condition,subject,path=r"E:\2021_Biomag_Dementia_MNE\inT_naive_resample\bad_samples"):
bads = np.load(os.path.join(path,'bad_samples{}{}.npy'.format(condition,subject)))
bads = np.unpackbits(bads,axis=None).reshape(160,300000).astype(np.bool)
return bads
def ema(data,alpha):
"""
in place ema
"""
num_samples = data.shape[1]
#ma = np.mean(data,axis=1,keepdims=True)
for t in np.arange(1,num_samples):
data[:,t] = alpha*data[:,t]+(1-alpha)*data[:,t-1]
return data
def emstd(data,ema,alpha):
"""
inplace std
"""
num_samples = data.shape[1]
#ma = np.mean(data,axis=1,keepdims=True)
data[:,0]=np.var(data,axis=1)
for t in np.arange(1,num_samples):
data[:,t] = alpha*(data[:,t]-ema[:,t])**2+(1-alpha)*data[:,t-1]
return np.sqrt(data)
def ema_substraction(data,alpha):
"""
data: channels,samples
small alpha means large memory for the mean
"""
if data.shape[0]>data.shape[1]:
warnings.warn("Axis 0 has less entries then axis 1. Check that data has shape ('channels,samples') ")
moving_avg = data.copy()
num_samples = data.shape[1]
#ma = np.mean(data,axis=1,keepdims=True)
ema(moving_avg,alpha=alpha)
return data-moving_avg
def em_avg_std(data,alpha):
"""
data: channels,samples
small alpha means large memory for the mean
"""
# todo: change input to )samples,channels)
data = np.transpose(data,[1,0])
num_samples = data.shape[1]
#ma = np.mean(data,axis=1,keepdims=True)
moving_avg = ema(data.copy(),alpha=alpha)
em_std = emstd(data.copy(),moving_avg,alpha)
return np.transpose((data-moving_avg)/em_std,[1,0])
def moving_average(data,N,padding=0,alpha=1):
"""
data.shape: (channels,time_steps)
"""
num_samples = data.shape[1]
cleaned = data.copy()
EMA = np.mean(data[:,0:N])
for k in np.arange(num_samples-N):
EMA = alpha*np.mean(data[:,k:k+N])+(1-alpha)*EMA
cleaned[:,k:k+N]=data[:,k:k+N]-EMA
return cleaned
def _moving_average_ch(data,N,padding=0,alpha=1):
"""
data.shape: (time_steps)
"""
num_samples = data.shape[0]
cleaned = data.copy()
EMA = np.mean(data[0:N])
for k in np.arange(num_samples-N):
EMA = alpha*np.mean(data[k:k+N])+(1-alpha)*EMA
cleaned[k:k+N]=data[k:k+N]-EMA
return cleaned
def get_class_labels(tasks):
mapping = {'dementia':'dem','control':'control',
'mci':'mci'}
labels=[]
for t in tasks:
labels+=[mapping[t]]
return labels
def parse_filename(filename):
conditions = ['control','dementia','mci']
for c in conditions:
if c in filename:
_,remainder = filename.split(c)
condition = c
subject_id = int(''.join(filter(lambda x: x.isdigit(), remainder)))
break
return condition,subject_id
def serialize_np(items):
return items.tolist()
def suject_dicts_are_unique(subject_dicts):
control_subjects = np.concatenate([fold_dict['control'] for fold_dict in subject_dicts]).astype(int)
dementia_subjects = np.concatenate([fold_dict['dementia'] for fold_dict in subject_dicts]).astype(int)
mci_subjects = np.concatenate([fold_dict['mci'] for fold_dict in subject_dicts]).astype(int)
any_double = 0
any_double += np.sum(np.bincount(control_subjects)>1)
any_double += np.sum(np.bincount(dementia_subjects)>1)
any_double += np.sum(np.bincount(mci_subjects)>1)
if any_double!=0:
print('control:\n',control_subjects)
print('dementia:\n',dementia_subjects)
print('mci:\n',mci_subjects)
return False
else:
return True
def check_dicts_contain_subjects(subject_dicts,subjects=None):
"""
If subjects is None that all subjects of the biomag dataset must be contained in subject_dicts
"""
return
def split_subjects(subjects,method,**kwargs):
return method(subjects,**kwargs)
class Subject_Splitter():
def __init__(self,subjects,method,**kwargs):
gen_split_A,gen_split_B,subjectsA,subjectsB, labelsA, labelsB
self.generator = zip(gen_split_A,gen_split_B)
return
def __getitem__(self,k):
return
def subjects_by_site(subjects,utility_path):
assert len(np.setdiff1d(['mci','dementia','control'],list(subjects.keys())))==0,(subjects.keys())
subjects_by_site = {'A':{'mci':[],'dementia':[],'control':[]},
'B':{'mci':[],'dementia':[],'control':[]}}
for condition in list(subjects.keys()):
by_site = sort_by_site(subjects,condition,path= utility_path)
subjects_by_site['A'][condition].extend(by_site['A'])
subjects_by_site['B'][condition].extend(by_site['B'])
return subjects_by_site
def split_wrt_site(subjects,test_ratio):
from sklearn.model_selection import train_test_split
if not 'A' in subjects.keys() or not 'B' in subjects.keys():
subjects = subjects_by_site(subjects)
labelsA,labelsB, subjectsA, subjectsB = _split_wrt_site_base(subjects)
subjectsA_train,subjectsA_test,labelsA_train,labelsA_test = train_test_split(
subjectsA,labelsA,test_size=test_ratio)
subjectsB_train,subjectsB_test,labelsB_train,labelsB_test = train_test_split(
subjectsB,labelsB,test_size=test_ratio)
print(np.where(labelsB_train=='control'))
traincontrol = np.concatenate([subjectsA_train[labelsA_train=='control'] , subjectsB_train[labelsB_train=='control']])
traindementia = np.concatenate([subjectsA_train[labelsA_train=='dementia'] , subjectsB_train[labelsB_train=='dementia']])
trainmci = np.concatenate([subjectsA_train[labelsA_train=='mci'] , subjectsB_train[labelsB_train=='mci']])
testcontrol = np.concatenate([subjectsA_test[labelsA_test=='control'] , subjectsB_test[labelsB_test=='control']])
testdementia = np.concatenate([subjectsA_test[labelsA_test=='dementia'] , subjectsB_test[labelsB_test=='dementia']])
testmci = np.concatenate([subjectsA_test[labelsA_test=='mci'] , subjectsB_test[labelsB_test=='mci']])
subjects_train = {'control':traincontrol,'mci':trainmci,'dementia':traindementia}
subjects_test = {'control':testcontrol,'mci':testmci,'dementia':testdementia}
return subjects_train, subjects_test
def _split_wrt_site_base(subjects):
subjectsA = subjects['A']
subjectsB = subjects['B']
subject_controlA = subjectsA['control']
subject_dementA = subjectsA['dementia']
subject_mciA = subjectsA['mci']
subjectsA = np.concatenate([subject_controlA,subject_dementA,subject_mciA])
labelsA = len(subject_controlA)*['control']+\
len(subject_dementA)*['dementia']+\
len(subject_mciA)*['mci']
subject_controlB = subjectsB['control']
subject_dementB = subjectsB['dementia']
subject_mciB = subjectsB['mci']
subjectsB = np.concatenate([subject_controlB,subject_dementB,subject_mciB])
labelsB = len(subject_controlB)*['control']+\
len(subject_dementB)*['dementia']+\
len(subject_mciB)*['mci']
return np.array(labelsA),np.array(labelsB), subjectsA, subjectsB
def _subjects_dict_wrt_site(subjects):
subjectsA = subjects['A']
subjectsB = subjects['B']
return subjectsA, subjectsB
def sort_by_site(subjects,condition,path= r'./dataframes/maxwell_and_temporal'):
"""
sorts subject with a condition by site
"""
by_site = {'A':[],'B':[]}
for k in subjects[condition]:
site = get_site_from_condition_number(condition,k,path)
by_site[site]+=[k]
by_site['A']=np.array(by_site['A'])
by_site['B']=np.array(by_site['B'])
return by_site
def cv_split_wrt_site(subjects,n_splits,utility_path):
"""
subjects has keys: ['A','B']
and values being dicts with keys 'control', 'dementia', 'mci'
returns all splits
"""
from sklearn.model_selection import StratifiedKFold
if not 'A' in subjects.keys() or not 'B' in subjects.keys():
subjects = subjects_by_site(subjects,utility_path)
labelsA,labelsB, subjectsA, subjectsB = _split_wrt_site_base(subjects)
kfoldA = StratifiedKFold(n_splits=n_splits)
kfoldB = StratifiedKFold(n_splits=n_splits)
gen_split_A = kfoldA.split(np.arange(len(labelsA)),labelsA)
gen_split_B = kfoldB.split(np.arange(len(labelsB)),labelsB)
splits = []
for k in range(n_splits):
trainidxA,testidxA = next(gen_split_A)
trainidxB,testidxB = next(gen_split_B)
labelsAtrain = labelsA[trainidxA]
labelsBtrain = labelsB[trainidxB]
labelsAtest = labelsA[testidxA]
labelsBtest = labelsB[testidxB]
traincontrol = np.concatenate([subjectsA[trainidxA][labelsAtrain=='control'],
subjectsB[trainidxB][labelsBtrain=='control']])
traindementia = np.concatenate([subjectsA[trainidxA][labelsAtrain=='dementia'],
subjectsB[trainidxB][labelsBtrain=='dementia']])
trainmci = np.concatenate([subjectsA[trainidxA][labelsAtrain=='mci'],
subjectsB[trainidxB][labelsBtrain=='mci']])
testcontrol = np.concatenate([subjectsA[testidxA][labelsAtest=='control'],
subjectsB[testidxB][labelsBtest=='control']])
testdementia = np.concatenate([subjectsA[testidxA][labelsAtest=='dementia'],
subjectsB[testidxB][labelsBtest=='dementia']])
testmci = np.concatenate([subjectsA[testidxA][labelsAtest=='mci'],
subjectsB[testidxB][labelsBtest=='mci']])
subjects_train = {'control':traincontrol,'mci':trainmci,'dementia':traindementia}
subjects_test = {'control':testcontrol,'mci':testmci,'dementia':testdementia}
splits+=[(subjects_train,subjects_test)]
return splits
def split_ratio(subjects,test_ratio):
"""
test_ratio percentage of data from each class which is left out from training and put aside for testing.
"""
subjects_dementia = subjects['dementia']
subjects_control = subjects['control']
subjects_mci = subjects['mci']
def choice(idx):
if len(idx)==0:
return np.array([])
num_test = int(len(idx)*test_ratio)
if num_test==0:
warnings.warn('# of cases for any class is 0. Test data will be empty.')
return []
else:
return np.random.choice(idx,size=num_test,replace=False)
testdementia = choice(subjects_dementia)
testcontrol = choice(subjects_control)
testmci = choice(subjects_mci)
traindementia = np.array(list(set(list(subjects_dementia))-set(list(testdementia))))
traincontrol = np.array(list(set(list(subjects_control))-set(list(testcontrol))))
trainmci = np.array(list(set(list(subjects_mci))-set(list(testmci))))
return {'control':traincontrol,'mci':trainmci,'dementia':traindementia},\
{'control':testcontrol,'mci':testmci,'dementia':testdementia}
def condition_to_digit(condition):
if condition=='mci':
return 1
elif condition=='control':
return 0
elif condition=='dementia':
return 2
else:
raise ValueError("condition must be mci, control or dementia")
def digit_to_condition(digit):
if digit==1:
return 'mci'
elif digit==0:
return 'control'
elif digit==2:
return 'dementia'
else:
raise ValueError("condition must be mci, control or dementia")
def unique_bads_in_fif(raw):
raw.info['bads'] = list(np.unique(np.array(raw.info['bads'])))
def get_subjects_wrt_site(subjects,cA,cB,condition,utility_path='.'):
num_subjects = {'control':100,'mci':15,'dementia':29}[condition]
for k in np.arange(1,num_subjects+1):
site = get_site_from_condition_number(condition,k,utility_path)
if site=='A' and cA>0:
cA-=1
subjects[condition]+=[k]
elif site=='B' and cB>0:
cB-=1
subjects[condition]+=[k]
elif cA==0 and cB==0:
break
def get_raw_mne_info_condition_number(condition,number,path=r'E:\2021_Biomag_Dementia_MNE\inT\interpolated100Hz\raw'):
import mne
print('site: ',get_site_from_condition_number(condition,number))
raw = mne.io.read_raw_fif(os.path.join(path,'100Hz{}{:03d}raw.fif'.format(condition,number)))
return raw.info
def correct_rotation_info(infoB):
from scipy.spatial.transform import Rotation as R
locs = np.array([infoB['chs'][ch]['loc'][:3] for ch in range(160)])
rz = R.from_rotvec(np.radians(6) * np.array([0,0,1]))
locs = [email protected]_matrix()
for ch in range(160):
infoB['chs'][ch]['loc'][:3]=locs[ch]
return infoB
def correct_rotation(rawB):
rawB.info = correct_rotation_info(rawB.info)
return rawB
def get_site_from_condition_number(condition,subject_number,direc= r'dataframes\maxwell_and_temporal'):
warnings.warn("Method is deprecated use 'get_site'.")
return get_site('foo',**{'condition':condition,'number':subject_number,'filepath':direc})
assert condition in ['mci','dementia','control']
maxwelldf = os.listdir(direc)
site = [f for f in maxwelldf if condition in f and '{:03d}'.format(subject_number) in f]
assert len(site)==1,(site)
site = site[0]
site = site.split(condition)[1].split('site')[1][0]
assert site in ['A','B']
return site
def get_site_from_json(condition,number,filepath='.'):
path = pathlib.Path(filepath)
if condition=='test':
with open(path / 'sites_test.json','r') as f:
site_dict = json.load(f)
elif condition=='control':
with open(path / 'sites_control.json','r') as f:
site_dict = json.load(f)
elif condition=='dementia':
with open(path / 'sites_dementia.json','r') as f:
site_dict = json.load(f)
elif condition=='mci':
with open(path / 'sites_mci.json','r') as f:
site_dict = json.load(f)
elif condition=='train':
with open(path / 'sites_train.json','r') as f:
site_dict = json.load(f)
else:
raise ValueError("Wrong condition provided.")
return site_dict['{}{:03d}'.format(condition,number)]
def get_site(*args,**kwargs):
if 'fs' in kwargs.keys():
return get_site_from_fs(kwargs['fs'])
elif 'condition' in kwargs.keys():
condition=kwargs['condition']
number = kwargs['number']
filepath = kwargs['filepath']
return get_site_from_json(condition,number,filepath)
else:
raise ValueError(kwargs.keys())
def get_stable_channels(matches,order):
"""
returns the channels that have order+1 matches
"""
stable_chs = []
for ch in range(160):
unique_matches = np.unique(matches[:,1,ch])
if len(unique_matches)==order+1:
stable_chs+=[(ch,unique_matches)]
# matches site A and site B have same shape for order 0
stable_chs = np.array(stable_chs)
if order==0:
stable_chs[:,1] = np.concatenate(stable_chs[:,1]).astype(np.uint8)
return stable_chs.astype(np.uint8)
def get_matches(site,mode,Path_to_cache):
cache = Path_to_cache
if mode=='full':
return np.arange(160)
elif mode=='order0':
matches = np.load(cache / "utility_data" / "pareto_opt_matches.npy")
matches = get_stable_channels(matches,0)
return matches[:,int(site=='B')]
elif mode=='by_distance':
import json
with open(cache / "utility_data" / 'A_B_bestpositional_hungarian.json') as f:
match = json.load(f)
match['matching0'].pop('info')
channel_matches = np.array([list(match['matching0'].keys()),list(match['matching0'].values())]).astype(int)
return channel_matches[int(site=='B')]
else:
raise ValueError('Valid values for mode are: {}, {}, {}'.format('full','order0','by_distance'))
# used for BioMag2021 channel matching
def write_matching(node_edit_path_dict,filename):
if not 'info' in node_edit_path_dict.keys():
raise KeyError('node_edit_path_dict must have key "info" ')
with open(filename,'w') as f:
json.dump({'matching0':node_edit_path_dict},f)
def load_matching(filename,remove_info=True):
with open(filename,'r') as f:
loaded_d = json.load(f)
node_edit_path_dict = loaded_d['matching0']
if remove_info:
print('info: ',node_edit_path_dict['info'])
del node_edit_path_dict['info']
return np.array(list(node_edit_path_dict.items())).astype('int')
def load_key_chain(sub_info,key_chain:list):
"""
sub_info: meta information in spm12 format e.g. from BioMag2021 Competition
"""
assert type(key_chain)==list,"key_chain must be of type list"
try:
if sub_info.shape==(1,1):
lvl_info = sub_info.flat[0][key_chain[0]]
else:
lvl_info = sub_info[key_chain[0]]
except ValueError as e:
print(e)
print('Possible next keys are: ',sub_info.dtype.names)
return
if len(key_chain)!=1:
return load_key_chain(lvl_info,key_chain[1:])
#print('key loaded')
return lvl_info.copy()
def get2D_coords(info):
x_pos = load_key_chain(info['D'],['channels','X_plot2D'])
x_pos = np.concatenate(np.squeeze(x_pos)[:160])
y_pos = load_key_chain(info['D'],['channels','Y_plot2D'])
y_pos = np.concatenate(np.squeeze(y_pos)[:160])
coords2D = np.transpose(np.array([x_pos,y_pos])[:,:,0])
return coords2D
def split_channel_groups(data,meta):
"""
With respect to the sensor site, a different number of channels is given.
In both sites the first 160 channels contain the meg data.
params:
-------
data: array w/ shape (160+type2channels+type3channels,time_samples)
meta:
returns:
-------
meg,type2records,type3records
"""
ch_labels = load_key_chain(meta['D'],['channels'])['label'][0]
type2trailing = ch_labels[160][0][:3]
meg = data['data'][:160]
N_type2 = np.max([j+160 for j,label in enumerate(ch_labels[160:]) if type2trailing in label[0]])+1
type2channels = slice(160,N_type2)
type3channels = slice(N_type2,None)
type2records = data['data'][type2channels]
type3records = data['data'][type3channels]
return meg,type2records,type3records
def load_spm_info(meta_path,subject,condition=None):
if condition is None:
condition='test'
infospm = io.loadmat(meta_path +'\hokuto_{}{}.mat'.format(condition,subject))
return infospm
def load_spm(conditionGroup_meta,conditionGroup_data,subject,just_meg=True,asfloat32=False):
"""
return meg, infospm
"""
if 'control' in conditionGroup_data:
condition='control'
elif 'dementia' in conditionGroup_data:
condition='dementia'
elif 'mci' in conditionGroup_data:
condition='mci'
elif 'test' in conditionGroup_data:
condition='test'
else:
raise NameError("name 'condition' is not defined")
infospm = load_spm_info(conditionGroup_meta,subject,condition)#( +'\hokuto_{}{}.mat'.format(condition,subject))
dataspm = io.loadmat(conditionGroup_data +'\hokuto_{}{}.mat'.format(condition,subject))
fs=int(load_key_chain(infospm['D'],['Fsample']))
if fs==1000:
type2name = 'eeg'
type3name = 'others'
site='A'
else:
type2name ='trig'
type3name = 'magnetometer reference'
site='B'
meg,type2records,type3records = split_channel_groups(dataspm,infospm)
if asfloat32:
meg = meg.astype(np.float32)
type2records = type2records.astype(np.float32)
type3records = type3records.astype(np.float32)
if just_meg:
return meg, infospm
else:
return meg,type2records,type3records, infospm
def make_raw(meg,infospm,down_sample_B=False):
"""
return raw,chanpos,chanori,markers,site
"""
import mne
fs=int(load_key_chain(infospm['D'],['Fsample']))
site = get_site_from_fs(fs)
if down_sample_B and site=='B':
meg = meg[:,::2]
fs = 1000
markers = load_key_chain(infospm['D'],['fiducials','fid','pnt'])/1000
chanpos = load_key_chain(infospm['D'],['sensors','meg','chanpos'])/1000
chanori = load_key_chain(infospm['D'],['sensors','meg','chanori'])
markers[:,[0, 1]] = markers[:,[1, 0]]
chanori[:,[0, 1]] = chanori[:,[1, 0]]
chanpos[:,[0, 1]] = chanpos[:,[1, 0]]
ch_pos = {'MEG {:03}'.format(ch):chanpos[ch] for ch in range(160)}
digmontage = mne.channels.make_dig_montage(
nasion=markers[0],
lpa=markers[2],
rpa=markers[1],
hpi=[markers[2],markers[1],markers[0],markers[4],markers[3]],
coord_frame='head')
from mne.io import constants
info = mne.create_info(ch_names=['MEG {:03}'.format(ch) for ch in range(160)], sfreq=fs, ch_types='grad')
info.set_montage(digmontage)
raw = mne.io.RawArray(meg, info)
#raw.info['line_freq']=50
#raw.info['device_info']='site '+site
ch_types = constants.FIFF['FIFFV_COIL_KIT_GRAD']
for k in range(160):
raw.info['chs'][k]['coil_type']=ch_types
raw.info['chs'][k]['unit'] = mne.io.kit.constants.FIFF.FIFF_UNIT_T
raw.info['chs'][k]['loc'] = np.concatenate([chanpos[k],np.zeros(9)])
return raw,chanpos,chanori,markers,site
def make_montage(infospm=None,condition=None,subject=None,meta_path=None):
import mne
if infospm is None:
assert meta_path is not None and condition is not None and subject is not None
infospm = load_spm_info(meta_path, subject, condition)
markers = load_key_chain(infospm['D'],['fiducials','fid','pnt'])/1000
chanpos = load_key_chain(infospm['D'],['sensors','meg','chanpos'])/1000
markers[:,[0, 1]] = markers[:,[1, 0]]
chanpos[:,[0, 1]] = chanpos[:,[1, 0]]
ch_pos = {'MEG {:03}'.format(ch):chanpos[ch] for ch in range(160)}
digmontage = mne.channels.make_dig_montage(
nasion=markers[0],
lpa=markers[2],
rpa=markers[1],
hpi=[markers[2],markers[1],markers[0],markers[4],markers[3]],
coord_frame='head')
return digmontage
def get_fiducials(info):
"""
returns:
------
DataFrame of fiducial points
"""
fid_lab_iter = np.concatenate(load_key_chain(info['D'],['fiducials','fid','label']).flatten())
pnts_iter = load_key_chain(info['D'],['fiducials','fid','pnt'])
return
|
pd.DataFrame({'x':pnts_iter[:,0],'y':pnts_iter[:,1],'z':pnts_iter[:,2]},index=fid_lab_iter)
|
pandas.DataFrame
|
import os
import sys
import pandas as pd
import numpy as np
import random
from utils import *
np.random.seed(9527)
random.seed(9527)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--max_movie_num', type=int, default=1000)
parser.add_argument('--max_user_num', type=int, default=1000)
parser.add_argument('--max_user_like_tag', type=int, default=20)
parser.add_argument('--min_user_like_tag', type=int, default=10)
parser.add_argument('--max_tag_per_movie', type=int, default=8)
parser.add_argument('--min_tag_per_movie', type=int, default=8)
parser.add_argument('--rater', type=str, default='qualitybase')
parser.add_argument('--recsys', type=str, default='Pop')
parser.add_argument('--rcttag_user_num', type=int, default=100)
parser.add_argument('--rcttag_movie_num', type=int, default=10)
parser.add_argument('--missing_rate_rating', type=float, default=0.02)
parser.add_argument('--missing_type_rating', type=str, default='default')
parser.add_argument('--missing_rate_obstag', type=float, default=0.007)
parser.add_argument('--missing_type_obstag', type=str, default='default')
parser.add_argument('--quality_sigma', type=float, default=0.75)
parser.add_argument('--test_identifiable_num', type=int, default=5000)
parser.add_argument('--test_identifiable_num_positive',
type=int,
default=1500)
parser.add_argument('--test_inidentifiable_num', type=int, default=4000)
parser.add_argument('--test_inidentifiable_positive',
type=int,
default=1200)
parser.add_argument('--obstag_non_missing_rate', type=float, default=0.6)
parser.add_argument('--need_trainset', type=int, default=0)
parser.add_argument('--need_testset', type=int, default=0)
parser.add_argument('--rerank_id', type=int, default=1)
args = parser.parse_args()
paras = vars(args)
data_dir = './'
if not os.path.exists(data_dir + 'generate_data/'):
os.makedirs(data_dir + 'generate_data/')
for i in ['train', 'test']:
if not os.path.exists(data_dir + 'final_data/before_rerank_id/' + i):
os.makedirs(data_dir + 'final_data/before_rerank_id/' + i)
if not os.path.exists(data_dir + 'final_data/rerank_id/' + i):
os.makedirs(data_dir + 'final_data/rerank_id/' + i)
big_movie_tag_ct = pd.read_csv(data_dir + 'original_data/movie_tag_ct.csv')
base_movie_rating = pd.read_csv(data_dir +
'original_data/movie_rating.csv',
index_col='movieid')
print('======generating base data======')
# generate user_id data
if os.path.exists(data_dir + 'generate_data/user_id.csv'):
user_id = np.array(
pd.read_csv(data_dir + 'generate_data/user_id.csv',
index_col='userid').index)
max_user_num = len(user_id)
else:
max_user_num = paras['max_user_num']
user_id = np.array(range(max_user_num))
pd.DataFrame(data=user_id,
columns=['userid']).set_index('userid').to_csv(
data_dir + 'generate_data/user_id.csv', header=True)
# generate movie_id data
mv_tag_count: pd.DataFrame = big_movie_tag_ct[[
'movieid', 'tagCount'
]].groupby('movieid')['tagCount'].sum().sort_values(
ascending=False) # 每部电影被多少人次打过tag
if os.path.exists(data_dir + 'generate_data/movie_id.csv'):
movie_data = pd.read_csv(data_dir + 'generate_data/movie_id.csv',
index_col='movieid')
movie_id = np.array(movie_data.index)
max_movie_num = len(movie_id)
else:
max_movie_num = min(len(mv_tag_count), paras['max_movie_num'])
movie_id = np.array(mv_tag_count.head(max_movie_num).index)
movie_data = pd.DataFrame(data=movie_id,
columns=['movieid']).set_index('movieid')
movie_data.to_csv(data_dir + 'generate_data/movie_id.csv', header=True)
obstag_count: pd.DataFrame = big_movie_tag_ct[
big_movie_tag_ct['movieid'].isin(movie_id)].groupby(
'tagid')['tagCount'].sum().sort_values(
ascending=False).to_frame() # 统计选出来的电影集合里,以标签为单位,每个标签的总数量
rct_distribution = obstag_count['tagCount'] / \
obstag_count['tagCount'].sum() # 生成标签流行度的分布
obstag_count.to_csv(data_dir + 'generate_data/obstag_count.csv',
header=True)
# generate movie_real_tag_list data
if os.path.exists(data_dir + 'generate_data/movie_real_tag_list.csv'):
movie_real_tag_list = pd.read_csv(
data_dir + 'generate_data/movie_real_tag_list.csv',
index_col='movieid')
movie_real_tag_list['taglist'] = movie_real_tag_list['taglist'].apply(
eval)
# print(movie_real_tag_list.head())
else:
movie_real_tag_list =
|
pd.DataFrame()
|
pandas.DataFrame
|
import argparse
from haystack import Finder
from haystack.document_store.elasticsearch import ElasticsearchDocumentStore
from haystack.retriever.dense import EmbeddingRetriever
import pandas as pd
from transformers import AutoTokenizer, AutoModel
class FaqBot(object):
"""
FAQ-Style question answering chatbot. Contains methods for storing embedding documents for retrieval and
interacting with the bot. These methods contain elements inspired by:
https://github.com/deepset-ai/haystack/blob/master/tutorials/Tutorial4_FAQ_style_QA.py
This also contains several methods for pre-processing the experimental data for FAQ style format.
"""
def __init__(self, args):
self.args = args
# FAQ type finder
self.document_store = ElasticsearchDocumentStore(
host="localhost",
password="",
index="faq_" + self.args.run_type,
embedding_field="question_emb",
embedding_dim=768,
excluded_meta_data=["question_emb"]
)
self.retriever = EmbeddingRetriever(document_store=self.document_store,
embedding_model=self.args.embedding_model,
use_gpu=False)
def interact(self, question):
"""
Method to be used for isolated debugging of FAQ bot through make command or directly
:param question: Question to ask
"""
finder = Finder(reader=None, retriever=self.retriever)
prediction = finder.get_answers_via_similar_questions(question=question, top_k_retriever=1)
print("Answer:\n", prediction['answers'][0]['answer'])
print("Probability: ", prediction['answers'][0]['probability'])
def store_documents(self):
"""
Used to store "documents" or FAQ data (curated question and answer pairs) for later comparison
to user queries. Here, we are indexing in elasticsearch.
NOTE: Expects docker instance of elasticsearch to be running (see make elastic command)
"""
df = pd.read_csv(self.args.path_to_train_data + self.args.faq_dataset)
# Minor data cleaning
df.fillna(value="", inplace=True)
df["question"] = df["question"].apply(lambda x: x.strip())
# Get embeddings for our questions from the FAQs
questions = list(df["question"].values)
df["question_emb"] = self.retriever.embed_queries(texts=questions)
# convert from numpy to list for ES indexing
df["question_emb"] = df["question_emb"].apply(list)
df = df.rename(columns={"answer": "text"})
# Convert Dataframe to list of dicts and index them in our DocumentStore
docs_to_index = df.to_dict(orient="records")
self.document_store.write_documents(docs_to_index)
def download_embedding_model(self):
tokenizer = AutoTokenizer.from_pretrained(self.args.embedding_model)
model = AutoModel.from_pretrained(self.args.embedding_model)
tokenizer.save_pretrained(self.args.embedding_model)
model.save_pretrained(self.args.embedding_model)
def combine_and_refine_faq_datasets(bouncename):
"""
Refining JHU covid dataset
:param bouncename: Name of exported file
"""
df_jhu_unique = pd.read_csv(BASE_PATH + "chatbots/haystack/data/faq/jhu_covid_qa.csv")
df_jhu_unique.dropna(inplace=True)
df_jhu_rating_90 = df_jhu_unique[df_jhu_unique['rating'] >= 90]
df_jhu_rating_90.drop_duplicates(subset='question1', keep="first", inplace=True)
df_jhu_rating_90.rename({'question1': 'question'}, axis=1, inplace=True)
df_jhu_rating_90 = df_jhu_rating_90[['question', 'answer']]
df_jhu_unique.drop_duplicates(subset='question2', keep="first", inplace=True)
df_jhu_unique.rename({'question2': 'question'}, axis=1, inplace=True)
df_jhu_unique = df_jhu_unique[['question', 'answer']]
df_faqcovidbert = pd.read_csv(BASE_PATH + "chatbots/haystack/data/faq/faq-faq_covidbert.csv")
df_faqcovidbert = df_faqcovidbert.replace('\n', '. ', regex=True)
df = df_faqcovidbert.append(df_jhu_unique, ignore_index=True)
df = df.append(df_jhu_rating_90, ignore_index=True)
df.drop_duplicates(subset='question', keep="first", inplace=True)
# Shuffling rows
df = df.sample(frac=1).reset_index(drop=True)
df.to_csv(bouncename, encoding='utf-8', index=False)
def convert_faq_answers_to_files(filename, bouncename, include_question_nums=False):
"""
Converts FAQ style dataset to format amenable to haystack annotation tool, with option to
include question indices corresponding to row values in excel. If questions are not included,
docs can be used for retrieval storage in QA-style chatbot.
:param filename: FAQ style file to bounce
:param bouncename: Base name of exported annotation files
:param include_question_nums: Whether to include question numbers in exported files
"""
df = pd.read_csv(filename)
count = 1
unique_answers = df['answer'].unique()
for answer in unique_answers:
questions = df.index[df['answer'] == answer].tolist()
# incrementing by 2 to accommodate excel
questions = [x + 2 for x in questions]
questions_str = ', '.join(str(e) for e in questions)
if include_question_nums:
text = ":QUESTIONS: " + questions_str + " :QUESTIONS:\n" + answer + "\n\n"
else:
text = answer
new_name = bouncename.replace('.txt', str(count) + '.txt')
with open(new_name, "w") as text_file:
text_file.write(text)
count += 1
print("Annotation files created.")
def convert_faq_to_dialog_format(filename, bouncename):
"""
Converts FAQ style dataset to single question + answer format, ammenable
to DialoGPT and other conversational models
:param filename: Name of FAQ style data file to convert
:param bouncename: Name of new dataset to bounce
:return:
"""
df = pd.read_csv(filename)
dialog_df = pd.DataFrame()
lines = []
for line in range(0, df.shape[0]):
lines.append(df.iloc[line]['question'])
lines.append(df.iloc[line]['answer'])
dialog_df['lines'] = lines
dialog_df.to_csv(bouncename, encoding='utf-8', index=False)
def create_intent_file(filename, intent_filename):
"""
Creating intent file for Rasa NLU, Prepending with '- ' for direct insertion of
text into rasa/data/nlu.md intent entries, as similarity across questions between faq and qa
will likely cause overlap issues.
:param filename: Name of FAQ style data file to convert to intent text file
:param intent_filename: Name of intent text file to bounce
"""
df = pd.read_csv(filename)
questions = []
for line in range(0, df.shape[0]):
questions.append('- ' + df.iloc[line]['question'].replace('\n', '') + '\n')
with open(intent_filename, "w") as output:
output.write(str(''.join(questions)))
print("Generated {} for intent training.".format(intent_filename))
def create_faq_control_dataset(filename):
"""
Creating FAQ control dataset of 2nd half of full dataset (QA is first half). Data was already shuffled
in combine_and_refine_faq_datasets so a simple split is fine
:param bouncename: Name of file to export
"""
df =
|
pd.read_csv(filename)
|
pandas.read_csv
|
"""
dariah.topics.modeling
~~~~~~~~~~~~~~~~~~~~~~
This module implements low-level LDA modeling functions.
"""
from pathlib import Path
import tempfile
import os
import logging
import multiprocessing
import shutil
from typing import Optional, Union
import cophi
import lda
import numpy as np
import pandas as pd
from dariah.mallet import MALLET
from dariah.core import utils
logging.getLogger("lda").setLevel(logging.WARNING)
class LDA:
"""Latent Dirichlet allocation.
Args:
num_topics: The number of topics.
num_iterations: The number of iterations.
alpha:
eta:
random_state:
mallet:
"""
def __init__(
self,
num_topics: int,
num_iterations: int = 1000,
alpha: float = 0.1,
eta: float = 0.01,
random_state: int = None,
mallet: Optional[Union[str, Path]] = None,
) -> None:
self.num_topics = num_topics
self.num_iterations = num_iterations
self.alpha = alpha
self.eta = eta
self.random_state = random_state
self.mallet = mallet
if mallet:
if not Path(self.mallet).exists():
# Check if MALLET is in environment variable:
if not os.environ.get(self.mallet):
raise OSError(
"MALLET executable was not found. "
"'{}' does not exist".format(self.mallet)
)
self.mallet = os.environ.get(self.mallet)
if not Path(self.mallet).is_file():
raise OSError(
"'{}' is not a file. "
"Point to the 'mallet/bin/mallet' file.".format(self.mallet)
)
else:
self._model = lda.LDA(
n_topics=self.num_topics,
n_iter=self.num_iterations,
alpha=self.alpha,
eta=self.eta,
random_state=self.random_state,
)
def fit(self, dtm: pd.DataFrame) -> None:
"""Fit the model.
Args:
dtm: The document-term matrix.
"""
self._vocabulary = list(dtm.columns)
self._documents = list(dtm.index)
dtm = dtm.fillna(0).astype(int)
if self.mallet:
self._mallet_lda(dtm)
else:
self._riddell_lda(dtm.values)
@property
def topics(self):
"""Topics with 200 top words.
"""
if self.mallet:
return self._mallet_topics
else:
return self._riddell_topics
@property
def topic_word(self):
"""Topic-word distributions.
"""
if self.mallet:
return self._mallet_topic_word
else:
return self._riddell_topic_word
@property
def topic_document(self):
"""Topic-document distributions.
"""
if self.mallet:
return self._mallet_topic_document
else:
return self._riddell_topic_document
@property
def topic_similarities(self):
"""Topic similarity matrix.
"""
data = self.topic_document.T.copy()
return self._similarities(data)
@property
def document_similarities(self):
"""Document similarity matrix.
"""
data = self.topic_document.copy()
return self._similarities(data)
@staticmethod
def _similarities(data: pd.DataFrame) -> pd.DataFrame:
"""Calculate cosine simliarity matrix.
Args:
data: A matrix to calculate similarities for.
Returns:
A similarity matrix.
"""
descriptors = data.columns
d = data.T @ data
norm = (data * data).sum(0) ** 0.5
similarities = d / norm / norm.T
return pd.DataFrame(similarities, index=descriptors, columns=descriptors)
def _riddell_lda(self, dtm: pd.DataFrame) -> None:
"""Fit the Riddell LDA model.
Args:
dtm: The document-term matrix.
"""
self._model.fit(dtm)
@property
def _riddell_topics(self):
"""Topics of the Riddell LDA model.
"""
maximum = len(self._vocabulary)
num_words = 200 if maximum > 200 else maximum
index = [f"topic{n}" for n in range(self.num_topics)]
columns = [f"word{n}" for n in range(num_words)]
topics = [
np.array(self._vocabulary)[np.argsort(dist)][: -num_words - 1 : -1]
for dist in self._model.topic_word_
]
return pd.DataFrame(topics, index=index, columns=columns)
@property
def _riddell_topic_word(self):
"""Topic-word distributions for Riddell LDA model.
"""
index = [f"topic{n}" for n in range(self.num_topics)]
return pd.DataFrame(
self._model.topic_word_, index=index, columns=self._vocabulary
)
@property
def _riddell_topic_document(self):
"""Topic-document distributions for Riddell LDA model.
"""
index = [f"topic{n}" for n in range(self.num_topics)]
return pd.DataFrame(
self._model.doc_topic_, index=self._documents, columns=index
).T
def _mallet_lda(self, dtm: pd.DataFrame) -> None:
"""Fit the MALLET LDA model.
Args:
dtm: The documen-term matrix.
"""
# Get number of CPUs for threaded processing:
cpu = multiprocessing.cpu_count() - 1
# Get temporary directory to dump corpus files:
self._tempdir = Path(tempfile.gettempdir(), "dariah-topics")
if self._tempdir.exists():
shutil.rmtree(str(self._tempdir))
self._tempdir.mkdir()
# Export document-term matrix to plaintext files:
corpus_sequence = Path(self._tempdir, "corpus.sequence")
cophi.text.utils.export(dtm, corpus_sequence, "plaintext")
# Construct MALLET object:
mallet = MALLET(self.mallet)
# Create a MALLET corpus file:
corpus_mallet = Path(self._tempdir, "corpus.mallet")
mallet.import_file(
input=str(corpus_sequence), output=str(corpus_mallet), keep_sequence=True
)
# Construct paths to MALLET output files:
self._topic_document_file = Path(self._tempdir, "topic-document.txt")
self._topic_word_file = Path(self._tempdir, "topic-word.txt")
self._topics_file = Path(self._tempdir, "topics.txt")
self._word_topic_counts_file = Path(self._tempdir, "word-topic-counts-file.txt")
# Train topics:
mallet.train_topics(
input=str(corpus_mallet),
num_topics=self.num_topics,
num_iterations=self.num_iterations,
output_doc_topics=self._topic_document_file,
output_topic_keys=self._topics_file,
topic_word_weights_file=self._topic_word_file,
word_topic_counts_file=self._word_topic_counts_file,
alpha=self.alpha,
beta=self.eta,
num_top_words=200,
num_threads=cpu,
random_seed=self.random_state,
)
@property
def _mallet_topics(self):
"""Topics of MALLET LDA model.
"""
maximum = len(self._vocabulary)
num_words = 200 if maximum > 200 else maximum
index = [f"topic{n}" for n in range(self.num_topics)]
columns = [f"word{n}" for n in range(num_words)]
topics = utils.read_mallet_topics(self._topics_file, num_words)
return pd.DataFrame(topics, index=index, columns=columns)
@property
def _mallet_topic_word(self):
"""Topic-word distributions of MALLET LDA model.
"""
index = [f"topic{n}" for n in range(self.num_topics)]
data =
|
pd.read_csv(self._topic_word_file, sep="\t", header=None)
|
pandas.read_csv
|
"""Merge the Stanford movie review corpus with the Cornell labels.
"""
__author__ = '<NAME>'
from typing import Set
from dataclasses import dataclass, field
import logging
import re
import shutil
from pathlib import Path
import pandas as pd
logger = logging.getLogger(__name__)
@dataclass
class DatasetFactory(object):
"""Creates a dataframe out of the merged dataset (text reviews with labels).
"""
FILE_NAME = re.compile(r'^(.+)\.csv$')
rt_pol_path: Path
stanford_path: Path
dataset_path: Path
tok_len: int
throw_out: Set[str] = field(repr=False)
repls: dict = field(repr=False)
split_col: str
@staticmethod
def split_sents(line: str) -> str:
"""In the Cornell corpus, many sentences are joined. This them
in to separate sentences.
"""
toks = re.split(r'([a-zA-Z0-9]{3,}) \. ([^.]{2,})', line)
if len(toks) == 4:
lines = [' '.join(toks[0:2]), ' '.join(toks[2:])]
else:
lines = [line]
return lines
def sent2bow(self, sent: str) -> str:
"""Create an skey from a text that represents a sentence.
"""
sent = sent.lower().strip()
sent = sent.encode('ascii', errors='ignore').decode()
for repl in self.repls:
sent = sent.replace(*repl)
sent = re.sub(r"\[([a-zA-Z0-9' ]+)\]", '\\1', sent)
sent = re.split(r"[\t ,';:\\/.]+", sent)
sent = filter(lambda x: len(x) > 0, sent)
sent = filter(lambda x: x not in self.throw_out, sent)
# I tried to take every other word for utterances that start the same,
# but this brought down matches
sent = tuple(sent)[0:self.tok_len]
sent = '><'.join(sent)
return sent
def polarity_df(self, path, polarity) -> pd.DataFrame:
"""Create a polarity data frame.
:param path: the path to the Cornell annotated corpus
:param polarity: the string used for the polarity column (`p` or `n`)
"""
lines = []
rid_pol = []
with open(path, encoding='latin-1') as f:
for line in f.readlines():
line = line.strip()
lines.append(line)
for line in lines:
# I tried to split on mulitple sentences that were joined, but that
# makes things worse
for sent in self.split_sents(line):
key = self.sent2bow(sent)
if len(key) == 0:
continue
rid_pol.append((polarity, key))
return
|
pd.DataFrame(rid_pol, columns=['polarity', 'skey'])
|
pandas.DataFrame
|
# Copyright (C) 2022 National Center for Atmospheric Research and National Oceanic and Atmospheric Administration
# SPDX-License-Identifier: Apache-2.0
#
#Code to create plots for surface observations
import os
import monetio as mio
import monet as monet
import seaborn as sns
from monet.util.tools import calc_8hr_rolling_max, calc_24hr_ave
import xarray as xr
import pandas as pd
import numpy as np
import cartopy.crs as ccrs
import matplotlib as mpl
import matplotlib.pyplot as plt
from numpy import corrcoef
sns.set_context('paper')
from monet.plots.taylordiagram import TaylorDiagram as td
from matplotlib.colors import ListedColormap
from monet.util.tools import get_epa_region_bounds as get_epa_bounds
import math
from ..plots import savefig
# from util import write_ncf
def make_24hr_regulatory(df, col=None):
"""Calculates 24-hour averages
Parameters
----------
df : dataframe
Model/obs pair of hourly data
col : str
Column label of observation variable to apply the calculation
Returns
-------
dataframe
dataframe with applied calculation
"""
return calc_24hr_ave(df, col)
def make_8hr_regulatory(df, col=None):
"""Calculates 8-hour rolling average daily
Parameters
----------
df : dataframe
Model/obs pair of hourly data
col : str
Column label of observation variable to apply the calculation
Returns
-------
dataframe
dataframe with applied calculation
"""
return calc_8hr_rolling_max(df, col, window=8)
def calc_default_colors(p_index):
"""List of default colors, lines, and markers to use if user does not
specify them in the input yaml file.
Parameters
----------
p_index : integer
Number of pairs in analysis class
Returns
-------
list
List of dictionaries containing default colors, lines, and
markers to use for plotting for the number of pairs in analysis class
"""
x = [dict(color='b', linestyle='--',marker='x'),
dict(color='g', linestyle='-.',marker='o'),
dict(color='r', linestyle=':',marker='v'),
dict(color='c', linestyle='--',marker='^'),
dict(color='m', linestyle='-.',marker='s')]
#Repeat these 5 instances over and over if more than 5 lines.
return x[p_index % 5]
def new_color_map():
"""Creates new color map for difference plots
Returns
-------
colormap
Orange and blue color map
"""
top = mpl.cm.get_cmap('Blues_r', 128)
bottom = mpl.cm.get_cmap('Oranges', 128)
newcolors = np.vstack((top(np.linspace(0, 1, 128)),
bottom(np.linspace(0, 1, 128))))
return ListedColormap(newcolors, name='OrangeBlue')
def map_projection(f):
"""Defines map projection. This needs updating to make it more generic.
Parameters
----------
f : class
model class
Returns
-------
cartopy projection
projection to be used by cartopy in plotting
"""
import cartopy.crs as ccrs
if f.model.lower() == 'cmaq':
proj = ccrs.LambertConformal(
central_longitude=f.obj.XCENT, central_latitude=f.obj.YCENT)
elif f.model.lower() == 'wrfchem' or f.model.lower() == 'rapchem':
if f.obj.MAP_PROJ == 1:
proj = ccrs.LambertConformal(
central_longitude=f.obj.CEN_LON, central_latitude=f.obj.CEN_LAT)
elif f.MAP_PROJ == 6:
#Plate Carree is the equirectangular or equidistant cylindrical
proj = ccrs.PlateCarree(
central_longitude=f.obj.CEN_LON)
else:
raise NotImplementedError('WRFChem projection not supported. Please add to surfplots.py')
#Need to add the projections you want to use for the other models here.
elif f.model.lower() == 'rrfs':
proj = ccrs.LambertConformal(
central_longitude=f.obj.cen_lon, central_latitude=f.obj.cen_lat)
elif f.model.lower() in ['cesm_fv','cesm_se']:
proj = ccrs.PlateCarree()
elif f.model.lower() == 'random':
proj = ccrs.PlateCarree()
else: #Let's change this tomorrow to just plot as lambert conformal if nothing provided.
raise NotImplementedError('Projection not defined for new model. Please add to surfplots.py')
return proj
def make_spatial_bias(df, column_o=None, label_o=None, column_m=None,
label_m=None, ylabel = None, vdiff=None,
outname = 'plot',
domain_type=None, domain_name=None, fig_dict=None,
text_dict=None,debug=False):
"""Creates surface spatial bias plot.
Parameters
----------
df : dataframe
model/obs pair data to plot
column_o : str
Column label of observation variable to plot
label_o : str
Name of observation variable to use in plot title
column_m : str
Column label of model variable to plot
label_m : str
Name of model variable to use in plot title
ylabel : str
Title of colorbar axis
vdiff : real number
Min and max value to use on colorbar axis
outname : str
file location and name of plot (do not include .png)
domain_type : str
Domain type specified in input yaml file
domain_name : str
Domain name specified in input yaml file
fig_dict : dictionary
Dictionary containing information about figure
text_dict : dictionary
Dictionary containing information about text
debug : boolean
Whether to plot interactively (True) or not (False). Flag for
submitting jobs to supercomputer turn off interactive mode.
Returns
-------
plot
surface bias plot
"""
if debug == False:
plt.ioff()
def_map = dict(states=True,figsize=[10, 5])
if fig_dict is not None:
map_kwargs = {**def_map, **fig_dict}
else:
map_kwargs = def_map
#If not specified use the PlateCarree projection
if 'crs' not in map_kwargs:
map_kwargs['crs'] = ccrs.PlateCarree()
#set default text size
def_text = dict(fontsize=20)
if text_dict is not None:
text_kwargs = {**def_text, **text_dict}
else:
text_kwargs = def_text
# set ylabel to column if not specified.
if ylabel is None:
ylabel = column_o
#Take the mean for each siteid
df_mean=df.groupby(['siteid'],as_index=False).mean()
#Specify val_max = vdiff. the sp_scatter_bias plot in MONET only uses the val_max value
#and then uses -1*val_max value for the minimum.
ax = monet.plots.sp_scatter_bias(
df_mean, col1=column_o, col2=column_m, map_kwargs=map_kwargs,val_max=vdiff,
cmap=new_color_map(), edgecolor='k',linewidth=.8)
if domain_type == 'all':
latmin= 25.0
lonmin=-130.0
latmax= 50.0
lonmax=-60.0
plt.title(domain_name + ': ' + label_m + ' - ' + label_o,fontweight='bold',**text_kwargs)
elif domain_type == 'epa_region' and domain_name is not None:
latmin,lonmin,latmax,lonmax,acro = get_epa_bounds(index=None,acronym=domain_name)
plt.title('EPA Region ' + domain_name + ': ' + label_m + ' - ' + label_o,fontweight='bold',**text_kwargs)
else:
latmin= math.floor(min(df.latitude))
lonmin= math.floor(min(df.longitude))
latmax= math.ceil(max(df.latitude))
lonmax= math.ceil(max(df.longitude))
plt.title(domain_name + ': ' + label_m + ' - ' + label_o,fontweight='bold',**text_kwargs)
if 'extent' not in map_kwargs:
map_kwargs['extent'] = [lonmin,lonmax,latmin,latmax]
ax.axes.set_extent(map_kwargs['extent'],crs=ccrs.PlateCarree())
#Update colorbar
f = plt.gcf()
model_ax = f.get_axes()[0]
cax = f.get_axes()[1]
#get the position of the plot axis and use this to rescale nicely the color bar to the height of the plot.
position_m = model_ax.get_position()
position_c = cax.get_position()
cax.set_position([position_c.x0, position_m.y0, position_c.x1 - position_c.x0, (position_m.y1-position_m.y0)*1.1])
cax.set_ylabel(ylabel,fontweight='bold',**text_kwargs)
cax.tick_params(labelsize=text_kwargs['fontsize']*0.8,length=10.0,width=2.0,grid_linewidth=2.0)
#plt.tight_layout(pad=0)
savefig(outname + '.png', loc=4, logo_height=120)
def make_timeseries(df, column=None, label=None, ax=None, avg_window=None, ylabel=None,
vmin = None, vmax = None,
domain_type=None, domain_name=None,
plot_dict=None, fig_dict=None, text_dict=None,debug=False):
"""Creates timeseries plot.
Parameters
----------
df : dataframe
model/obs pair data to plot
column : str
Column label of variable to plot
label : str
Name of variable to use in plot legend
ax : ax
matplotlib ax from previous occurrence so can overlay obs and model
results on the same plot
avg_window : rule
Pandas resampling rule (e.g., 'H', 'D')
ylabel : str
Title of y-axis
vmin : real number
Min value to use on y-axis
vmax : real number
Max value to use on y-axis
domain_type : str
Domain type specified in input yaml file
domain_name : str
Domain name specified in input yaml file
plot_dict : dictionary
Dictionary containing information about plotting for each pair
(e.g., color, linestyle, markerstyle)
fig_dict : dictionary
Dictionary containing information about figure
text_dict : dictionary
Dictionary containing information about text
debug : boolean
Whether to plot interactively (True) or not (False). Flag for
submitting jobs to supercomputer turn off interactive mode.
Returns
-------
ax
matplotlib ax such that driver.py can iterate to overlay multiple models on the
same plot
"""
if debug == False:
plt.ioff()
#First define items for all plots
#set default text size
def_text = dict(fontsize=14)
if text_dict is not None:
text_kwargs = {**def_text, **text_dict}
else:
text_kwargs = def_text
# set ylabel to column if not specified.
if ylabel is None:
ylabel = column
if label is not None:
plot_dict['label'] = label
if vmin is not None and vmax is not None:
plot_dict['ylim'] = [vmin,vmax]
#scale the fontsize for the x and y labels by the text_kwargs
plot_dict['fontsize'] = text_kwargs['fontsize']*0.8
#Then, if no plot has been created yet, create a plot and plot the obs.
if ax is None:
#First define the colors for the observations.
obs_dict = dict(color='k', linestyle='-',marker='*', linewidth=1.2, markersize=6.)
if plot_dict is not None:
#Whatever is not defined in the yaml file is filled in with the obs_dict here.
plot_kwargs = {**obs_dict, **plot_dict}
else:
plot_kwargs = obs_dict
# create the figure
if fig_dict is not None:
f,ax = plt.subplots(**fig_dict)
else:
f,ax = plt.subplots(figsize=(10,6))
# plot the line
if avg_window is None:
ax = df[column].plot(ax=ax, **plot_kwargs)
else:
ax = df[column].resample(avg_window).mean().plot(ax=ax, legend=True, **plot_kwargs)
# If plot has been created add to the current axes.
else:
# this means that an axis handle already exists and use it to plot the model output.
if avg_window is None:
ax = df[column].plot(ax=ax, legend=True, **plot_dict)
else:
ax = df[column].resample(avg_window).mean().plot(ax=ax, legend=True, **plot_dict)
#Set parameters for all plots
ax.set_ylabel(ylabel,fontweight='bold',**text_kwargs)
ax.set_xlabel(df.index.name,fontweight='bold',**text_kwargs)
ax.legend(frameon=False,fontsize=text_kwargs['fontsize']*0.8)
ax.tick_params(axis='both',length=10.0,direction='inout')
ax.tick_params(axis='both',which='minor',length=5.0,direction='out')
ax.legend(frameon=False,fontsize=text_kwargs['fontsize']*0.8,
bbox_to_anchor=(1.0, 0.9), loc='center left')
if domain_type is not None and domain_name is not None:
if domain_type == 'epa_region':
ax.set_title('EPA Region ' + domain_name,fontweight='bold',**text_kwargs)
else:
ax.set_title(domain_name,fontweight='bold',**text_kwargs)
return ax
def make_taylor(df, column_o=None, label_o='Obs', column_m=None, label_m='Model',
dia=None, ylabel=None, ty_scale=1.5,
domain_type=None, domain_name=None,
plot_dict=None, fig_dict=None, text_dict=None,debug=False):
"""Creates taylor plot. Note sometimes model values are off the scale
on this plot. This will be fixed soon.
Parameters
----------
df : dataframe
model/obs pair data to plot
column_o : str
Column label of observational variable to plot
label_o : str
Name of observational variable to use in plot legend
column_m : str
Column label of model variable to plot
label_m : str
Name of model variable to use in plot legend
dia : dia
matplotlib ax from previous occurrence so can overlay obs and model
results on the same plot
ylabel : str
Title of x-axis
ty_scale : real
Scale to apply to taylor plot to control the plotting range
domain_type : str
Domain type specified in input yaml file
domain_name : str
Domain name specified in input yaml file
plot_dict : dictionary
Dictionary containing information about plotting for each pair
(e.g., color, linestyle, markerstyle)
fig_dict : dictionary
Dictionary containing information about figure
text_dict : dictionary
Dictionary containing information about text
debug : boolean
Whether to plot interactively (True) or not (False). Flag for
submitting jobs to supercomputer turn off interactive mode.
Returns
-------
class
Taylor diagram class defined in MONET
"""
#First define items for all plots
if debug == False:
plt.ioff()
#set default text size
def_text = dict(fontsize=14.0)
if text_dict is not None:
text_kwargs = {**def_text, **text_dict}
else:
text_kwargs = def_text
# set ylabel to column if not specified.
if ylabel is None:
ylabel = column_o
#Then, if no plot has been created yet, create a plot and plot the first pair.
if dia is None:
# create the figure
if fig_dict is not None:
f = plt.figure(**fig_dict)
else:
f = plt.figure(figsize=(12,10))
sns.set_style('ticks')
# plot the line
dia = td(df[column_o].std(), scale=ty_scale, fig=f,
rect=111, label=label_o)
plt.grid(linewidth=1, alpha=.5)
cc = corrcoef(df[column_o].values, df[column_m].values)[0, 1]
dia.add_sample(df[column_m].std(), cc, zorder=9, label=label_m, **plot_dict)
# If plot has been created add to the current axes.
else:
# this means that an axis handle already exists and use it to plot another model
cc = corrcoef(df[column_o].values, df[column_m].values)[0, 1]
dia.add_sample(df[column_m].std(), cc, zorder=9, label=label_m, **plot_dict)
#Set parameters for all plots
contours = dia.add_contours(colors='0.5')
plt.clabel(contours, inline=1, fontsize=text_kwargs['fontsize']*0.8)
plt.grid(alpha=.5)
plt.legend(frameon=False,fontsize=text_kwargs['fontsize']*0.8,
bbox_to_anchor=(0.75, 0.93), loc='center left')
if domain_type is not None and domain_name is not None:
if domain_type == 'epa_region':
plt.title('EPA Region ' + domain_name,fontweight='bold',**text_kwargs)
else:
plt.title(domain_name,fontweight='bold',**text_kwargs)
ax = plt.gca()
ax.axis["left"].label.set_text('Standard Deviation: '+ylabel)
ax.axis["top"].label.set_text('Correlation')
ax.axis["left"].label.set_fontsize(text_kwargs['fontsize'])
ax.axis["top"].label.set_fontsize(text_kwargs['fontsize'])
ax.axis["left"].label.set_fontweight('bold')
ax.axis["top"].label.set_fontweight('bold')
ax.axis["top"].major_ticklabels.set_fontsize(text_kwargs['fontsize']*0.8)
ax.axis["left"].major_ticklabels.set_fontsize(text_kwargs['fontsize']*0.8)
ax.axis["right"].major_ticklabels.set_fontsize(text_kwargs['fontsize']*0.8)
return dia
def make_spatial_overlay(df, vmodel, column_o=None, label_o=None, column_m=None,
label_m=None, ylabel = None, vmin=None,
vmax = None, nlevels = None, proj = None, outname = 'plot',
domain_type=None, domain_name=None, fig_dict=None,
text_dict=None,debug=False):
"""Creates spatial overlay plot.
Parameters
----------
df : dataframe
model/obs pair data to plot
vmodel: dataarray
slice of model data to plot
column_o : str
Column label of observation variable to plot
label_o : str
Name of observation variable to use in plot title
column_m : str
Column label of model variable to plot
label_m : str
Name of model variable to use in plot title
ylabel : str
Title of colorbar axis
vmin : real number
Min value to use on colorbar axis
vmax : real number
Max value to use on colorbar axis
nlevels: integer
Number of levels used in colorbar axis
proj: cartopy projection
cartopy projection to use in plot
outname : str
file location and name of plot (do not include .png)
domain_type : str
Domain type specified in input yaml file
domain_name : str
Domain name specified in input yaml file
fig_dict : dictionary
Dictionary containing information about figure
text_dict : dictionary
Dictionary containing information about text
debug : boolean
Whether to plot interactively (True) or not (False). Flag for
submitting jobs to supercomputer turn off interactive mode.
Returns
-------
plot
spatial overlay plot
"""
if debug == False:
plt.ioff()
def_map = dict(states=True,figsize=[15, 8])
if fig_dict is not None:
map_kwargs = {**def_map, **fig_dict}
else:
map_kwargs = def_map
#set default text size
def_text = dict(fontsize=20)
if text_dict is not None:
text_kwargs = {**def_text, **text_dict}
else:
text_kwargs = def_text
# set ylabel to column if not specified.
if ylabel is None:
ylabel = column_o
#Take the mean for each siteid
df_mean=df.groupby(['siteid'],as_index=False).mean()
#Take the mean over time for the model output
vmodel_mean = vmodel[column_m].mean(dim='time').squeeze()
#Determine the domain
if domain_type == 'all':
latmin= 25.0
lonmin=-130.0
latmax= 50.0
lonmax=-60.0
title_add = domain_name + ': '
elif domain_type == 'epa_region' and domain_name is not None:
latmin,lonmin,latmax,lonmax,acro = get_epa_bounds(index=None,acronym=domain_name)
title_add = 'EPA Region ' + domain_name + ': '
else:
latmin= math.floor(min(df.latitude))
lonmin= math.floor(min(df.longitude))
latmax= math.ceil(max(df.latitude))
lonmax= math.ceil(max(df.longitude))
title_add = domain_name + ': '
#Map the model output first.
cbar_kwargs = dict(aspect=15,shrink=.8)
#Add options that this could be included in the fig_kwargs in yaml file too.
if 'extent' not in map_kwargs:
map_kwargs['extent'] = [lonmin,lonmax,latmin,latmax]
if 'crs' not in map_kwargs:
map_kwargs['crs'] = proj
#With pcolormesh, a Warning shows because nearest interpolation may not work for non-monotonically increasing regions.
#Because I do not want to pull in the edges of the lat lon for every model I switch to contourf.
#First determine colorbar, so can use the same for both contourf and scatter
if vmin == None and vmax == None:
vmin = np.min((vmodel_mean.quantile(0.01), df_mean[column_o].quantile(0.01)))
vmax = np.max((vmodel_mean.quantile(0.99), df_mean[column_o].quantile(0.99)))
if nlevels == None:
nlevels = 21
clevel = np.linspace(vmin,vmax,nlevels)
cmap = mpl.cm.get_cmap('Spectral_r',nlevels-1)
norm = mpl.colors.BoundaryNorm(clevel, ncolors=cmap.N, clip=False)
# For unstructured grid, we need a more advanced plotting code
# Call an external funtion (Plot_2D)
if vmodel.attrs.get('mio_has_unstructured_grid',False):
from .Plot_2D import Plot_2D
fig = plt.figure( figsize=fig_dict['figsize'] )
ax = fig.add_subplot(1,1,1,projection=proj)
p2d = Plot_2D( vmodel_mean, scrip_file=vmodel.mio_scrip_file, cmap=cmap, #colorticks=clevel, colorlabels=clevel,
cmin=vmin, cmax=vmax, lon_range=[lonmin,lonmax], lat_range=[latmin,latmax],
ax=ax, state=fig_dict['states'] )
else:
#I add extend='both' here because the colorbar is setup to plot the values outside the range
ax = vmodel_mean.monet.quick_contourf(cbar_kwargs=cbar_kwargs, figsize=map_kwargs['figsize'], map_kws=map_kwargs,
robust=True, norm=norm, cmap=cmap, levels=clevel, extend='both')
plt.gcf().canvas.draw()
plt.tight_layout(pad=0)
plt.title(title_add + label_o + ' overlaid on ' + label_m,fontweight='bold',**text_kwargs)
ax.axes.scatter(df_mean.longitude.values, df_mean.latitude.values,s=30,c=df_mean[column_o],
transform=ccrs.PlateCarree(), edgecolor='b', linewidth=.50, norm=norm,
cmap=cmap)
ax.axes.set_extent(map_kwargs['extent'],crs=ccrs.PlateCarree())
#Uncomment these lines if you update above just to verify colorbars are identical.
#Also specify plot above scatter = ax.axes.scatter etc.
#cbar = ax.figure.get_axes()[1]
#plt.colorbar(scatter,ax=ax)
#Update colorbar
# Call below only for structured grid cases
if not vmodel.attrs.get('mio_has_unstructured_grid',False):
f = plt.gcf()
model_ax = f.get_axes()[0]
cax = f.get_axes()[1]
#get the position of the plot axis and use this to rescale nicely the color bar to the height of the plot.
position_m = model_ax.get_position()
position_c = cax.get_position()
cax.set_position([position_c.x0, position_m.y0, position_c.x1 - position_c.x0, (position_m.y1-position_m.y0)*1.1])
cax.set_ylabel(ylabel,fontweight='bold',**text_kwargs)
cax.tick_params(labelsize=text_kwargs['fontsize']*0.8,length=10.0,width=2.0,grid_linewidth=2.0)
#plt.tight_layout(pad=0)
savefig(outname + '.png', loc=4, logo_height=100, dpi=150)
return ax
def calculate_boxplot(df, column=None, label=None, plot_dict=None, comb_bx = None, label_bx = None):
"""Combines data into acceptable format for box-plot
Parameters
----------
df : dataframe
Model/obs pair object
column : str
Column label of variable to plot
label : str
Name of variable to use in plot legend
comb_bx: dataframe
dataframe containing information to create box-plot from previous
occurrence so can overlay multiple model results on plot
label_bx: list
list of string labels to use in box-plot from previous occurrence so
can overlay multiple model results on plot
Returns
-------
dataframe, list
dataframe containing information to create box-plot
list of string labels to use in box-plot
"""
if comb_bx is None and label_bx is None:
comb_bx = pd.DataFrame()
label_bx = []
#First define the colors for the observations.
obs_dict = dict(color='gray', linestyle='-',marker='x', linewidth=1.2, markersize=6.)
if plot_dict is not None:
#Whatever is not defined in the yaml file is filled in with the obs_dict here.
plot_kwargs = {**obs_dict, **plot_dict}
else:
plot_kwargs = obs_dict
else:
plot_kwargs = plot_dict
#For all, a column to the dataframe and append the label info to the list.
plot_kwargs['column'] = column
plot_kwargs['label'] = label
comb_bx[label] = df[column]
label_bx.append(plot_kwargs)
return comb_bx, label_bx
def make_boxplot(comb_bx, label_bx, ylabel = None, vmin = None, vmax = None, outname='plot',
domain_type=None, domain_name=None,
plot_dict=None, fig_dict=None,text_dict=None,debug=False):
"""Creates box-plot.
Parameters
----------
comb_bx: dataframe
dataframe containing information to create box-plot from
calculate_boxplot
label_bx: list
list of string labels to use in box-plot from calculate_boxplot
ylabel : str
Title of y-axis
vmin : real number
Min value to use on y-axis
vmax : real number
Max value to use on y-axis
outname : str
file location and name of plot (do not include .png)
domain_type : str
Domain type specified in input yaml file
domain_name : str
Domain name specified in input yaml file
plot_dict : dictionary
Dictionary containing information about plotting for each pair
(e.g., color, linestyle, markerstyle)
fig_dict : dictionary
Dictionary containing information about figure
text_dict : dictionary
Dictionary containing information about text
debug : boolean
Whether to plot interactively (True) or not (False). Flag for
submitting jobs to supercomputer turn off interactive mode.
Returns
-------
plot
box plot
"""
if debug == False:
plt.ioff()
#First define items for all plots
#set default text size
def_text = dict(fontsize=14)
if text_dict is not None:
text_kwargs = {**def_text, **text_dict}
else:
text_kwargs = def_text
# set ylabel to column if not specified.
if ylabel is None:
ylabel = label_bx[0]['column']
#Fix the order and palate colors
order_box = []
pal = {}
for i in range(len(label_bx)):
order_box.append(label_bx[i]['label'])
pal[label_bx[i]['label']] = label_bx[i]['color']
#Make plot
if fig_dict is not None:
f,ax = plt.subplots(**fig_dict)
else:
f,ax = plt.subplots(figsize=(8,8))
#Define characteristics of boxplot.
boxprops = {'edgecolor': 'k', 'linewidth': 1.5}
lineprops = {'color': 'k', 'linewidth': 1.5}
boxplot_kwargs = {'boxprops': boxprops, 'medianprops': lineprops,
'whiskerprops': lineprops, 'capprops': lineprops,
'fliersize' : 2.0,
'flierprops': dict(marker='*',
markerfacecolor='blue',
markeredgecolor='none',
markersize = 6.0),
'width': 0.75, 'palette': pal,
'order': order_box,
'showmeans': True,
'meanprops': {'marker': ".", 'markerfacecolor': 'black',
'markeredgecolor': 'black',
'markersize': 20.0}}
sns.set_style("whitegrid")
sns.set_style("ticks")
sns.boxplot(ax=ax,x="variable", y="value",data=
|
pd.melt(comb_bx)
|
pandas.melt
|
import sys, os
import pandas as pd
import numpy as np
from statsmodels.tsa.base import tsa_model as tsa
from statsmodels.tsa import holtwinters as hw
import unittest
from nyoka import ExponentialSmoothingToPMML
class TestMethods(unittest.TestCase):
def test_keras_01(self):
def import_data(trend=False, seasonality=False):
"""
Returns a dataframe with time series values.
:param trend: boolean
If True, returns data with trend
:param seasonality: boolean
If True, returns data with seasonality
:return: ts_data: DataFrame
Index of the data frame is either a time-index or an integer index. First column has time series values
"""
if trend and seasonality:
# no of international visitors in Australia
data = [41.7275, 24.0418, 32.3281, 37.3287, 46.2132, 29.3463, 36.4829, 42.9777, 48.9015, 31.1802, 37.7179,
40.4202, 51.2069, 31.8872, 40.9783, 43.7725, 55.5586, 33.8509, 42.0764, 45.6423, 59.7668, 35.1919,
44.3197, 47.9137]
index =
|
pd.DatetimeIndex(start='2005', end='2010-Q4', freq='QS')
|
pandas.DatetimeIndex
|
# -*- coding: utf-8 -*-
"""System transmission plots.
This code creates transmission line and interface plots.
@author: <NAME>, <NAME>
"""
import os
import logging
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as mcolors
import matplotlib.dates as mdates
import marmot.config.mconfig as mconfig
from marmot.plottingmodules.plotutils.plot_library import PlotLibrary
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, DataSavedInModule,
UnderDevelopment, InputSheetError, MissingMetaData, UnsupportedAggregation, MissingZoneData)
class MPlot(PlotDataHelper):
"""transmission MPlot class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The transmission.py module contains methods that are
related to the transmission network.
MPlot inherits from the PlotDataHelper class to assist in creating figures.
"""
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, self.TECH_SUBSET,
Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
self.font_defaults = mconfig.parser("font_settings")
def line_util(self, **kwargs):
"""Creates a timeseries line plot of transmission lineflow utilization for each region.
Utilization is plotted between 0 and 1 on the y-axis.
The plot will default to showing the 10 highest utilized lines. A Line category
can also be passed instead, using the property field in the Marmot_plot_select.csv
Each scenarios is plotted on a separate Facet plot.
This methods calls _util() to create the figure.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = self._util(**kwargs)
return outputs
def line_hist(self, **kwargs):
"""Creates a histogram of transmission lineflow utilization for each region.
Utilization is plotted between 0 and 1 on the x-axis, with # lines on the y-axis.
Each bar is equal to a 0.05 utilization rate
The plot will default to showing all lines. A Line category can also be passed
instead using the property field in the Marmot_plot_select.csv
Each scenarios is plotted on a separate Facet plot.
This methods calls _util() and passes the hist=True argument to create the figure.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = self._util(hist=True, **kwargs)
return outputs
def _util(self, hist: bool = False, prop: str = None,
start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates utilization plots, line plot and histograms
This methods is called from line_util() and line_hist()
Args:
hist (bool, optional): If True creates a histogram of utilization.
Defaults to False.
prop (str, optional): Optional PLEXOS line category to display.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"line_Flow",self.Scenarios),
(True,"line_Import_Limit",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
# sets up x, y dimensions of plot
ncols, nrows = self.set_facet_col_row_dimensions(facet=True,
multi_scenario=self.Scenarios)
grid_size = ncols*nrows
# Used to calculate any excess axis to delete
plot_number = len(self.Scenarios)
excess_axs = grid_size - plot_number
for zone_input in self.Zones:
self.logger.info(f"For all lines touching Zone = {zone_input}")
mplt = PlotLibrary(nrows, ncols, sharey=True,
squeeze=False, ravel_axs=True)
fig, axs = mplt.get_figure()
plt.subplots_adjust(wspace=0.1, hspace=0.25)
data_table=[]
for n, scenario in enumerate(self.Scenarios):
self.logger.info(f"Scenario = {str(scenario)}")
# gets correct metadata based on area aggregation
if self.AGG_BY=='zone':
zone_lines = self.meta.zone_lines(scenario)
else:
zone_lines = self.meta.region_lines(scenario)
try:
zone_lines = zone_lines.set_index([self.AGG_BY])
except:
self.logger.warning("Column to Aggregate by is missing")
continue
try:
zone_lines = zone_lines.xs(zone_input)
zone_lines=zone_lines['line_name'].unique()
except KeyError:
self.logger.warning('No data to plot for scenario')
outputs[zone_input] = MissingZoneData()
continue
flow = self["line_Flow"].get(scenario).copy()
#Limit to only lines touching to this zone
flow = flow[flow.index.get_level_values('line_name').isin(zone_lines)]
if self.shift_leapday == True:
flow = self.adjust_for_leapday(flow)
limits = self["line_Import_Limit"].get(scenario).copy()
limits = limits.droplevel('timestamp').drop_duplicates()
limits.mask(limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
# This checks for a nan in string. If no scenario selected, do nothing.
if pd.notna(prop):
self.logger.info(f"Line category = {str(prop)}")
line_relations = self.meta.lines(scenario).rename(columns={"name":"line_name"}).set_index(["line_name"])
flow=pd.merge(flow,line_relations, left_index=True,
right_index=True)
flow=flow[flow["category"] == prop]
flow=flow.drop('category',axis=1)
flow = pd.merge(flow,limits[0].abs(),on = 'line_name',how='left')
flow['Util']=(flow['0_x'].abs()/flow['0_y']).fillna(0)
#If greater than 1 because exceeds flow limit, report as 1
flow['Util'][flow['Util'] > 1] = 1
annual_util=flow['Util'].groupby(["line_name"]).mean().rename(scenario)
# top annual utilized lines
top_utilization = annual_util.nlargest(10, keep='first')
color_dict = dict(zip(self.Scenarios,self.color_list))
if hist == True:
mplt.histogram(annual_util, color_dict,label=scenario, sub_pos=n)
else:
for line in top_utilization.index.get_level_values(level='line_name').unique():
duration_curve = flow.loc[line].sort_values(by='Util',
ascending=False).reset_index(drop=True)
mplt.lineplot(duration_curve, 'Util' ,label=line, sub_pos=n)
axs[n].set_ylim((0,1.1))
data_table.append(annual_util)
mplt.add_legend()
#Remove extra axes
mplt.remove_excess_axs(excess_axs,grid_size)
# add facet labels
mplt.add_facet_labels(xlabels=self.xlabels,
ylabels = self.ylabels)
if hist == True:
if pd.notna(prop):
prop_name = 'All Lines'
else:
prop_name = prop
plt.ylabel('Number of lines', color='black',
rotation='vertical', labelpad=30)
plt.xlabel(f'Line Utilization: {prop_name}', color='black',
rotation='horizontal', labelpad=30)
else:
if pd.notna(prop):
prop_name ='Top 10 Lines'
else:
prop_name = prop
plt.ylabel(f'Line Utilization: {prop_name}', color='black',
rotation='vertical', labelpad=60)
plt.xlabel('Intervals', color='black',
rotation='horizontal', labelpad=20)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
try:
del annual_util,
except:
continue
Data_Out = pd.concat(data_table)
outputs[zone_input] = {'fig': fig,'data_table':Data_Out}
return outputs
def int_flow_ind(self, figure_name: str = None, prop: str = None,
start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a line plot of interchange flows and their import and export limits.
Each interchange is potted on a separate facet plot.
The plot includes every interchange that originates or ends in the aggregation zone.
This can be adjusted by passing a comma separated string of interchanges to the property input.
The code will create either a timeseries or duration curve depending on
if the word 'duration_curve' is in the figure_name.
To make a duration curve, ensure the word 'duration_curve' is found in the figure_name.
Args:
figure_name (str, optional): User defined figure output name.
Defaults to None.
prop (str, optional): Comma separated string of interchanges.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table
"""
duration_curve=False
if 'duration_curve' in figure_name:
duration_curve = True
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"interface_Flow",self.Scenarios),
(True,"interface_Import_Limit",self.Scenarios),
(True,"interface_Export_Limit",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
scenario = self.Scenarios[0]
outputs = {}
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
for zone_input in self.Zones:
self.logger.info(f"For all interfaces touching Zone = {zone_input}")
Data_Table_Out = pd.DataFrame()
# gets correct metadata based on area aggregation
if self.AGG_BY=='zone':
zone_lines = self.meta.zone_lines(scenario)
else:
zone_lines = self.meta.region_lines(scenario)
try:
zone_lines = zone_lines.set_index([self.AGG_BY])
except:
self.logger.info("Column to Aggregate by is missing")
continue
zone_lines = zone_lines.xs(zone_input)
zone_lines = zone_lines['line_name'].unique()
#Map lines to interfaces
all_ints = self.meta.interface_lines(scenario) #Map lines to interfaces
all_ints.index = all_ints.line
ints = all_ints.loc[all_ints.index.intersection(zone_lines)]
#flow = flow[flow.index.get_level_values('interface_name').isin(ints.interface)] #Limit to only interfaces touching to this zone
#flow = flow.droplevel('interface_category')
export_limits = self["interface_Export_Limit"].get(scenario).copy().droplevel('timestamp')
export_limits.mask(export_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
export_limits = export_limits[export_limits.index.get_level_values('interface_name').isin(ints.interface)]
export_limits = export_limits[export_limits[0].abs() < 99998] #Filter out unenforced interfaces.
#Drop unnecessary columns.
export_limits.reset_index(inplace = True)
export_limits.drop(columns=['interface_category', 'units'], inplace=True)
export_limits.set_index('interface_name',inplace = True)
import_limits = self["interface_Import_Limit"].get(scenario).copy().droplevel('timestamp')
import_limits.mask(import_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
import_limits = import_limits[import_limits.index.get_level_values('interface_name').isin(ints.interface)]
import_limits = import_limits[import_limits[0].abs() < 99998] #Filter out unenforced interfaces.
reported_ints = import_limits.index.get_level_values('interface_name').unique()
#Drop unnecessary columns.
import_limits.reset_index(inplace = True)
import_limits.drop(columns=['interface_category', 'units'], inplace=True)
import_limits.set_index('interface_name',inplace = True)
#Extract time index
ti = self["interface_Flow"][self.Scenarios[0]].index.get_level_values('timestamp').unique()
if pd.notna(prop):
interf_list = prop.split(',')
self.logger.info('Plotting only interfaces specified in Marmot_plot_select.csv')
self.logger.info(interf_list)
else:
interf_list = reported_ints.copy()
self.logger.info('Plotting full time series results.')
xdim,ydim = self.set_x_y_dimension(len(interf_list))
mplt = PlotLibrary(ydim, xdim, squeeze=False,
ravel_axs=True)
fig, axs = mplt.get_figure()
grid_size = xdim * ydim
excess_axs = grid_size - len(interf_list)
plt.subplots_adjust(wspace=0.05, hspace=0.2)
missing_ints = 0
chunks = []
n = -1
for interf in interf_list:
n += 1
#Remove leading spaces
if interf[0] == ' ':
interf = interf[1:]
if interf in reported_ints:
chunks_interf = []
single_exp_lim = export_limits.loc[interf] / 1000 #TODO: Use auto unit converter
single_imp_lim = import_limits.loc[interf] / 1000
#Check if all hours have the same limit.
check = single_exp_lim.to_numpy()
identical = check[0] == check.all()
limits = pd.concat([single_exp_lim,single_imp_lim],axis = 1)
limits.columns = ['export limit','import limit']
limits.index = ti
for scenario in self.Scenarios:
flow = self["interface_Flow"].get(scenario)
single_int = flow.xs(interf, level='interface_name') / 1000
single_int.index = single_int.index.droplevel(['interface_category','units'])
single_int.columns = [interf]
single_int = single_int.reset_index().set_index('timestamp')
limits = limits.reset_index().set_index('timestamp')
if self.shift_leapday == True:
single_int = self.adjust_for_leapday(single_int)
if pd.notna(start_date_range):
single_int = single_int[start_date_range : end_date_range]
limits = limits[start_date_range : end_date_range]
if duration_curve:
single_int = self.sort_duration(single_int,interf)
mplt.lineplot(single_int, interf,
label=f"{scenario}\n interface flow",
sub_pos=n)
# Only print limits if it doesn't change monthly or if you are plotting a time series.
# Otherwise the limit lines could be misleading.
if not duration_curve or identical[0]:
if scenario == self.Scenarios[-1]:
#Only plot limits for last scenario.
limits_color_dict = {'export limit': 'red', 'import limit': 'green'}
mplt.lineplot(limits, 'export limit',
label='export limit', color=limits_color_dict,
linestyle='--', sub_pos=n)
mplt.lineplot(limits, 'import limit',
label='import limit', color=limits_color_dict,
linestyle='--', sub_pos=n)
#For output time series .csv
scenario_names = pd.Series([scenario] * len(single_int), name='Scenario')
single_int_out = single_int.set_index([scenario_names], append=True)
chunks_interf.append(single_int_out)
Data_out_line = pd.concat(chunks_interf,axis = 0)
Data_out_line.columns = [interf]
chunks.append(Data_out_line)
else:
self.logger.warning(f"{interf} not found in results. Have you tagged "
"it with the 'Must Report' property in PLEXOS?")
excess_axs += 1
missing_ints += 1
continue
axs[n].set_title(interf)
if not duration_curve:
mplt.set_subplot_timeseries_format(sub_pos=n)
if missing_ints == len(interf_list):
outputs = MissingInputData()
return outputs
Data_Table_Out = pd.concat(chunks,axis = 1)
Data_Table_Out = Data_Table_Out.reset_index()
index_name = 'level_0' if duration_curve else 'timestamp'
Data_Table_Out = Data_Table_Out.pivot(index = index_name,columns = 'Scenario')
#Limits_Out = pd.concat(limits_chunks,axis = 1)
#Limits_Out.index = ['Export Limit','Import Limit']
# Data_Table_Out = Data_Table_Out.reset_index()
# Data_Table_Out = Data_Table_Out.groupby(Data_Table_Out.index // 24).mean()
# Data_Table_Out.index = pd.date_range(start = '1/1/2024',end = '12/31/2024',freq = 'D')
mplt.add_legend()
plt.ylabel('Flow (GW)', color='black', rotation='vertical',
labelpad=30)
if duration_curve:
plt.xlabel('Sorted hour of the year', color='black', labelpad=30)
plt.tight_layout(rect=[0, 0.03, 1, 0.97])
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
#Limits_Out.to_csv(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission','Individual_Interface_Limits.csv'))
return outputs
def int_flow_ind_seasonal(self, figure_name: str = None, prop: str = None,
start_date_range: str = None,
end_date_range: str = None, **_):
"""#TODO: Finish Docstring
Args:
figure_name (str, optional): User defined figure output name.
Defaults to None.
prop (str, optional): Comma separated string of interchanges.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table
"""
#TODO: Use auto unit converter in method
duration_curve=False
if 'duration_curve' in figure_name:
duration_curve = True
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"interface_Flow",self.Scenarios),
(True,"interface_Import_Limit",self.Scenarios),
(True,"interface_Export_Limit",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
scenario = self.Scenarios[0]
outputs = {}
for zone_input in self.Zones:
self.logger.info("For all interfaces touching Zone = "+zone_input)
Data_Table_Out = pd.DataFrame()
# gets correct metadata based on area aggregation
if self.AGG_BY=='zone':
zone_lines = self.meta.zone_lines(scenario)
else:
zone_lines = self.meta.region_lines(scenario)
try:
zone_lines = zone_lines.set_index([self.AGG_BY])
except:
self.logger.info("Column to Aggregate by is missing")
continue
zone_lines = zone_lines.xs(zone_input)
zone_lines = zone_lines['line_name'].unique()
#Map lines to interfaces
all_ints = self.meta.interface_lines(scenario) #Map lines to interfaces
all_ints.index = all_ints.line
ints = all_ints.loc[all_ints.index.intersection(zone_lines)]
#flow = flow[flow.index.get_level_values('interface_name').isin(ints.interface)] #Limit to only interfaces touching to this zone
#flow = flow.droplevel('interface_category')
export_limits = self["interface_Export_Limit"].get(scenario).droplevel('timestamp')
export_limits.mask(export_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
export_limits = export_limits[export_limits.index.get_level_values('interface_name').isin(ints.interface)]
export_limits = export_limits[export_limits[0].abs() < 99998] #Filter out unenforced interfaces.
#Drop unnecessary columns.
export_limits.reset_index(inplace = True)
export_limits.drop(columns = 'interface_category',inplace = True)
export_limits.set_index('interface_name',inplace = True)
import_limits = self["interface_Import_Limit"].get(scenario).droplevel('timestamp')
import_limits.mask(import_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
import_limits = import_limits[import_limits.index.get_level_values('interface_name').isin(ints.interface)]
import_limits = import_limits[import_limits[0].abs() < 99998] #Filter out unenforced interfaces.
reported_ints = import_limits.index.get_level_values('interface_name').unique()
#Drop unnecessary columns.
import_limits.reset_index(inplace = True)
import_limits.drop(columns = 'interface_category',inplace = True)
import_limits.set_index('interface_name',inplace = True)
#Extract time index
ti = self["interface_Flow"][self.Scenarios[0]].index.get_level_values('timestamp').unique()
if prop != '':
interf_list = prop.split(',')
self.logger.info('Plotting only interfaces specified in Marmot_plot_select.csv')
self.logger.info(interf_list)
else:
interf_list = reported_ints.copy()
self.logger.info('Carving out season from ' + start_date_range + ' to ' + end_date_range)
#Remove missing interfaces from the list.
for interf in interf_list:
#Remove leading spaces
if interf[0] == ' ':
interf = interf[1:]
if interf not in reported_ints:
self.logger.warning(interf + ' not found in results.')
interf_list.remove(interf)
if not interf_list:
outputs = MissingInputData()
return outputs
xdim = 2
ydim = len(interf_list)
mplt = PlotLibrary(ydim, xdim, squeeze=False)
fig, axs = mplt.get_figure()
grid_size = xdim * ydim
excess_axs = grid_size - len(interf_list)
plt.subplots_adjust(wspace=0.05, hspace=0.2)
missing_ints = 0
chunks = []
limits_chunks = []
n = -1
for interf in interf_list:
n += 1
#Remove leading spaces
if interf[0] == ' ':
interf = interf[1:]
chunks_interf = []
single_exp_lim = export_limits.loc[interf] / 1000
single_imp_lim = import_limits.loc[interf] / 1000
#Check if all hours have the same limit.
check = single_exp_lim.to_numpy()
identical = check[0] == check.all()
limits = pd.concat([single_exp_lim,single_imp_lim],axis = 1)
limits.columns = ['export limit','import limit']
limits.index = ti
for scenario in self.Scenarios:
flow = self["interface_Flow"].get(scenario)
single_int = flow.xs(interf,level = 'interface_name') / 1000
single_int.index = single_int.index.droplevel('interface_category')
single_int.columns = [interf]
if self.shift_leapday == True:
single_int = self.adjust_for_leapday(single_int)
summer = single_int[start_date_range:end_date_range]
winter = single_int.drop(summer.index)
summer_lim = limits[start_date_range:end_date_range]
winter_lim = limits.drop(summer.index)
if duration_curve:
summer = self.sort_duration(summer,interf)
winter = self.sort_duration(winter,interf)
summer_lim = self.sort_duration(summer_lim,'export limit')
winter_lim = self.sort_duration(winter_lim,'export limit')
axs[n,0].plot(summer[interf],linewidth = 1,label = scenario + '\n interface flow')
axs[n,1].plot(winter[interf],linewidth = 1,label = scenario + '\n interface flow')
if scenario == self.Scenarios[-1]:
for col in summer_lim:
limits_color_dict = {'export limit': 'red', 'import limit': 'green'}
axs[n,0].plot(summer_lim[col], linewidth=1, linestyle='--',
color=limits_color_dict[col], label=col)
axs[n,1].plot(winter_lim[col], linewidth=1, linestyle='--',
color=limits_color_dict[col], label=col)
#For output time series .csv
scenario_names = pd.Series([scenario] * len(single_int), name='Scenario')
single_int_out = single_int.set_index([scenario_names], append=True)
chunks_interf.append(single_int_out)
Data_out_line = pd.concat(chunks_interf,axis = 0)
Data_out_line.columns = [interf]
chunks.append(Data_out_line)
axs[n,0].set_title(interf)
axs[n,1].set_title(interf)
if not duration_curve:
locator = mdates.AutoDateLocator(minticks=4, maxticks=8)
formatter = mdates.ConciseDateFormatter(locator)
formatter.formats[2] = '%d\n %b'
formatter.zero_formats[1] = '%b\n %Y'
formatter.zero_formats[2] = '%d\n %b'
formatter.zero_formats[3] = '%H:%M\n %d-%b'
formatter.offset_formats[3] = '%b %Y'
formatter.show_offset = False
axs[n,0].xaxis.set_major_locator(locator)
axs[n,0].xaxis.set_major_formatter(formatter)
axs[n,1].xaxis.set_major_locator(locator)
axs[n,1].xaxis.set_major_formatter(formatter)
mplt.add_legend()
Data_Table_Out = pd.concat(chunks,axis = 1)
#Limits_Out = pd.concat(limits_chunks,axis = 1)
#Limits_Out.index = ['Export Limit','Import Limit']
plt.ylabel('Flow (GW)', color='black', rotation='vertical', labelpad=30)
if duration_curve:
plt.xlabel('Sorted hour of the year', color = 'black', labelpad = 30)
fig.text(0.15,0.98,'Summer (' + start_date_range + ' to ' + end_date_range + ')',fontsize = 16)
fig.text(0.58,0.98,'Winter',fontsize = 16)
plt.tight_layout(rect=[0, 0.03, 1, 0.97])
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
#Limits_Out.to_csv(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission','Individual_Interface_Limits.csv'))
return outputs
#TODO: re-organize parameters (self vs. not self)
def int_flow_ind_diff(self, figure_name: str = None, **_):
"""Plot under development
This method plots the hourly difference in interface flow between two scenarios for
individual interfaces, with a facet for each interface.
The two scenarios are defined in the "Scenario_Diff" row of Marmot_user_defined_inputs.
The interfaces are specified in the plot properties field of Marmot_plot_select.csv (column 4).
The figure and data tables are saved within the module.
Returns:
UnderDevelopment(): Exception class, plot is not functional.
"""
return UnderDevelopment() # TODO: add new get_data method
duration_curve=False
if 'duration_curve' in figure_name:
duration_curve = True
check_input_data = []
Flow_Collection = {}
Import_Limit_Collection = {}
Export_Limit_Collection = {}
check_input_data.extend([get_data(Flow_Collection,"interface_Flow",self.Marmot_Solutions_folder, self.Scenarios)])
check_input_data.extend([get_data(Import_Limit_Collection,"interface_Import_Limit",self.Marmot_Solutions_folder, self.Scenarios)])
check_input_data.extend([get_data(Export_Limit_Collection,"interface_Export_Limit",self.Marmot_Solutions_folder, self.Scenarios)])
if 1 in check_input_data:
outputs = MissingInputData()
return outputs
scenario = self.Scenarios[0]
outputs = {}
if not pd.isnull(self.start_date):
self.logger.info("Plotting specific date range: \
{} to {}".format(str(self.start_date),str(self.end_date)))
for zone_input in self.Zones:
self.logger.info("For all interfaces touching Zone = "+zone_input)
Data_Table_Out = pd.DataFrame()
# gets correct metadata based on area aggregation
if self.AGG_BY=='zone':
zone_lines = self.meta.zone_lines(scenario)
else:
zone_lines = self.meta.region_lines(scenario)
try:
zone_lines = zone_lines.set_index([self.AGG_BY])
except:
self.logger.info("Column to Aggregate by is missing")
continue
zone_lines = zone_lines.xs(zone_input)
zone_lines = zone_lines['line_name'].unique()
#Map lines to interfaces
all_ints = self.meta.interface_lines(scenario) #Map lines to interfaces
all_ints.index = all_ints.line
ints = all_ints.loc[all_ints.index.intersection(zone_lines)]
#flow = flow[flow.index.get_level_values('interface_name').isin(ints.interface)] #Limit to only interfaces touching to this zone
#flow = flow.droplevel('interface_category')
export_limits = Export_Limit_Collection.get(scenario).droplevel('timestamp')
export_limits.mask(export_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
export_limits = export_limits[export_limits.index.get_level_values('interface_name').isin(ints.interface)]
export_limits = export_limits[export_limits[0].abs() < 99998] #Filter out unenforced interfaces.
#Drop unnecessary columns.
export_limits.reset_index(inplace = True)
export_limits.drop(columns = 'interface_category',inplace = True)
export_limits.set_index('interface_name',inplace = True)
import_limits = Import_Limit_Collection.get(scenario).droplevel('timestamp')
import_limits.mask(import_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
import_limits = import_limits[import_limits.index.get_level_values('interface_name').isin(ints.interface)]
import_limits = import_limits[import_limits[0].abs() < 99998] #Filter out unenforced interfaces.
reported_ints = import_limits.index.get_level_values('interface_name').unique()
#Drop unnecessary columns.
import_limits.reset_index(inplace = True)
import_limits.drop(columns = 'interface_category',inplace = True)
import_limits.set_index('interface_name',inplace = True)
#Extract time index
ti = Flow_Collection[self.Scenarios[0]].index.get_level_values('timestamp').unique()
if self.prop != '':
interf_list = self.prop.split(',')
self.logger.info('Plotting only interfaces specified in Marmot_plot_select.csv')
self.logger.info(interf_list)
else:
interf_list = reported_ints.copy()
self.logger.info('Plotting full time series results.')
xdim,ydim = self.set_x_y_dimension(len(interf_list))
mplt = PlotLibrary(nrows, ncols,
squeeze=False, ravel_axs=True)
fig, axs = mplt.get_figure()
grid_size = xdim * ydim
excess_axs = grid_size - len(interf_list)
plt.subplots_adjust(wspace=0.05, hspace=0.2)
missing_ints = 0
chunks = []
limits_chunks = []
n = -1
for interf in interf_list:
n += 1
#Remove leading spaces
if interf[0] == ' ':
interf = interf[1:]
if interf in reported_ints:
chunks_interf = []
single_exp_lim = export_limits.loc[interf] / 1000 #TODO: Use auto unit converter in method
single_imp_lim = import_limits.loc[interf] / 1000
#Check if all hours have the same limit.
check = single_exp_lim.to_numpy()
identical = check[0] == check.all()
limits = pd.concat([single_exp_lim,single_imp_lim],axis = 1)
limits.columns = ['export limit','import limit']
limits.index = ti
for scenario in self.Scenarios:
flow = Flow_Collection.get(scenario)
single_int = flow.xs(interf,level = 'interface_name') / 1000
single_int.index = single_int.index.droplevel('interface_category')
single_int.columns = [interf]
if self.shift_leapday == True:
single_int = self.adjust_for_leapday(single_int)
single_int = single_int.reset_index().set_index('timestamp')
limits = limits.reset_index().set_index('timestamp')
if not pd.isnull(self.start_date):
single_int = single_int[self.start_date : self.end_date]
limits = limits[self.start_date : self.end_date]
if duration_curve:
single_int = self.sort_duration(single_int,interf)
mplt.lineplot(single_int,interf,label = scenario + '\n interface flow', sub_pos = n)
#Only print limits if it doesn't change monthly or if you are plotting a time series. Otherwise the limit lines could be misleading.
if not duration_curve or identical[0]:
if scenario == self.Scenarios[-1]:
#Only plot limits for last scenario.
limits_color_dict = {'export limit': 'red', 'import limit': 'green'}
mplt.lineplot(limits,'export limit',label = 'export limit',color = limits_color_dict,linestyle = '--', sub_pos = n)
mplt.lineplot(limits,'import limit',label = 'import limit',color = limits_color_dict,linestyle = '--', sub_pos = n)
#For output time series .csv
scenario_names = pd.Series([scenario] * len(single_int),name = 'Scenario')
single_int_out = single_int.set_index([scenario_names],append = True)
chunks_interf.append(single_int_out)
Data_out_line = pd.concat(chunks_interf,axis = 0)
Data_out_line.columns = [interf]
chunks.append(Data_out_line)
else:
self.logger.warning(interf + ' not found in results. Have you tagged it with the "Must Report" property in PLEXOS?')
excess_axs += 1
missing_ints += 1
continue
axs[n].set_title(interf)
handles, labels = axs[n].get_legend_handles_labels()
if not duration_curve:
self.set_subplot_timeseries_format(axs, sub_pos=n)
if n == len(interf_list) - 1:
axs[n].legend(loc='lower left',bbox_to_anchor=(1.05,-0.2))
if missing_ints == len(interf_list):
outputs = MissingInputData()
return outputs
Data_Table_Out = pd.concat(chunks,axis = 1)
Data_Table_Out = Data_Table_Out.reset_index()
index_name = 'level_0' if duration_curve else 'timestamp'
Data_Table_Out = Data_Table_Out.pivot(index = index_name,columns = 'Scenario')
#Limits_Out = pd.concat(limits_chunks,axis = 1)
#Limits_Out.index = ['Export Limit','Import Limit']
# Data_Table_Out = Data_Table_Out.reset_index()
# Data_Table_Out = Data_Table_Out.groupby(Data_Table_Out.index // 24).mean()
# Data_Table_Out.index = pd.date_range(start = '1/1/2024',end = '12/31/2024',freq = 'D')
plt.ylabel('Flow (GW)', color='black', rotation='vertical', labelpad=30)
if duration_curve:
plt.xlabel('Sorted hour of the year', color = 'black', labelpad = 30)
plt.tight_layout(rect=[0, 0.03, 1, 0.97])
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
#Limits_Out.to_csv(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission','Individual_Interface_Limits.csv'))
return outputs
def line_flow_ind(self, figure_name: str = None, prop: str = None, **_):
"""
#TODO: Finish Docstring
This method plots flow, import and export limit, for individual transmission lines,
with a facet for each line.
The lines are specified in the plot properties field of Marmot_plot_select.csv (column 4).
The plot includes every interchange that originates or ends in the aggregation zone.
Figures and data tables are returned to plot_main
Args:
figure_name (str, optional): [description]. Defaults to None.
prop (str, optional): [description]. Defaults to None.
Returns:
[type]: [description]
"""
#TODO: Use auto unit converter in method
duration_curve=False
if 'duration_curve' in figure_name:
duration_curve = True
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"line_Flow",self.Scenarios),
(True,"line_Import_Limit",self.Scenarios),
(True,"line_Export_Limit",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
#Select only lines specified in Marmot_plot_select.csv.
select_lines = prop.split(",")
if select_lines == None:
return InputSheetError()
self.logger.info('Plotting only lines specified in Marmot_plot_select.csv')
self.logger.info(select_lines)
scenario = self.Scenarios[0] #Select single scenario for purpose of extracting limits.
export_limits = self["line_Export_Limit"].get(scenario).droplevel('timestamp')
export_limits.mask(export_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
export_limits = export_limits[export_limits[0].abs() < 99998] #Filter out unenforced lines.
import_limits = self["line_Import_Limit"].get(scenario).droplevel('timestamp')
import_limits.mask(import_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
import_limits = import_limits[import_limits[0].abs() < 99998] #Filter out unenforced lines.
flows = self["line_Flow"][scenario]
# limited_lines = []
# i = 0
# all_lines = flows.index.get_level_values('line_name').unique()
# for line in all_lines:
# i += 1
# print(line)
# print(i / len(all_lines))
# exp = export_limits.loc[line].squeeze()[0]
# imp = import_limits.loc[line].squeeze()[0]
# flow = flows.xs(line,level = 'line_name')[0].tolist()
# if exp in flow or imp in flow:
# limited_lines.append(line)
# print(limited_lines)
# pd.DataFrame(limited_lines).to_csv('/Users/mschwarz/OR OSW local/Solutions/Figures_Output/limited_lines.csv')
xdim,ydim = self.set_x_y_dimension(len(select_lines))
grid_size = xdim * ydim
excess_axs = grid_size - len(select_lines)
mplt = PlotLibrary(ydim, xdim, squeeze=False,
ravel_axs=True)
fig, axs = mplt.get_figure()
reported_lines = self["line_Flow"][self.Scenarios[0]].index.get_level_values('line_name').unique()
n = -1
missing_lines = 0
chunks = []
limits_chunks = []
for line in select_lines:
n += 1
#Remove leading spaces
if line[0] == ' ':
line = line[1:]
if line in reported_lines:
chunks_line = []
single_exp_lim = export_limits.loc[line]
single_imp_lim = import_limits.loc[line]
limits = pd.concat([single_exp_lim,single_imp_lim])
limits_chunks.append(limits)
single_exp_lim = single_exp_lim.squeeze()
single_imp_lim = single_imp_lim.squeeze()
# If export/import limits were pulled as an interval property, take the average.
if len(single_exp_lim) > 1:
single_exp_lim = single_exp_lim.mean()
single_imp_lim = single_imp_lim.mean()
limits = pd.Series([single_exp_lim,single_imp_lim],name = line)
limits_chunks.append(limits)
for scenario in self.Scenarios:
flow = self["line_Flow"][scenario]
single_line = flow.xs(line,level = 'line_name')
single_line = single_line.droplevel('units')
single_line.columns = [line]
if self.shift_leapday == True:
single_line = self.adjust_for_leapday(single_line)
single_line_out = single_line.copy()
if duration_curve:
single_line = self.sort_duration(single_line,line)
mplt.lineplot(single_line, line, label = scenario + '\n line flow', sub_pos=n)
#Add %congested number to plot.
if scenario == self.Scenarios[0]:
viol_exp = single_line[single_line[line] > single_exp_lim].count()
viol_imp = single_line[single_line[line] < single_imp_lim].count()
viol_perc = 100 * (viol_exp + viol_imp) / len(single_line)
viol_perc = round(viol_perc.squeeze(),3)
axs[n].annotate('Violation = ' + str(viol_perc) + '% of hours', xy = (0.1,0.15),xycoords='axes fraction')
cong_exp = single_line[single_line[line] == single_exp_lim].count()
cong_imp = single_line[single_line[line] == single_imp_lim].count()
cong_perc = 100 * (cong_exp + cong_imp) / len(single_line)
cong_perc = round(cong_perc.squeeze(),0)
axs[n].annotate('Congestion = ' + str(cong_perc) + '% of hours', xy = (0.1,0.1),xycoords='axes fraction')
#For output time series .csv
scenario_names = pd.Series([scenario] * len(single_line_out),name = 'Scenario')
single_line_out = single_line_out.set_index([scenario_names],append = True)
chunks_line.append(single_line_out)
Data_out_line = pd.concat(chunks_line,axis = 0)
chunks.append(Data_out_line)
else:
self.logger.warning(line + ' not found in results. Have you tagged it with the "Must Report" property in PLEXOS?')
excess_axs += 1
missing_lines += 1
continue
mplt.remove_excess_axs(excess_axs,grid_size)
axs[n].axhline(y = single_exp_lim, ls = '--',label = 'Export Limit',color = 'red')
axs[n].axhline(y = single_imp_lim, ls = '--',label = 'Import Limit', color = 'green')
axs[n].set_title(line)
if not duration_curve:
mplt.set_subplot_timeseries_format(sub_pos=n)
if missing_lines == len(select_lines):
outputs = MissingInputData()
return outputs
Data_Table_Out = pd.concat(chunks,axis = 1)
#Limits_Out = pd.concat(limits_chunks,axis = 1)
#Limits_Out.index = ['Export Limit','Import Limit']
mplt.add_legend()
plt.ylabel('Flow (MW)', color='black', rotation='vertical', labelpad=30)
#plt.tight_layout(rect=[0, 0.03, 1, 0.97])
plt.tight_layout()
fn_suffix = '_duration_curve' if duration_curve else ''
fig.savefig(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission',figure_name + fn_suffix + '.svg'), dpi=600, bbox_inches='tight')
Data_Table_Out.to_csv(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission',figure_name + fn_suffix + '.csv'))
# Limits_Out.to_csv(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission',figure_name + 'limits.csv'))
outputs = DataSavedInModule()
return outputs
def line_flow_ind_diff(self, figure_name: str = None,
prop: str = None, **_):
"""
#TODO: Finish Docstring
This method plots the flow difference for individual transmission lines, with a facet for each line.
The scenarios are specified in the "Scenario_Diff_plot" field of Marmot_user_defined_inputs.csv.
The lines are specified in the plot properties field of Marmot_plot_select.csv (column 4).
Figures and data tables are saved in the module.
Args:
figure_name (str, optional): [description]. Defaults to None.
prop (str, optional): [description]. Defaults to None.
Returns:
[type]: [description]
"""
#TODO: Use auto unit converter in method
duration_curve=False
if 'duration_curve' in figure_name:
duration_curve = True
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"line_Flow",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
outputs = MissingInputData()
return outputs
#Select only lines specified in Marmot_plot_select.csv.
select_lines = prop.split(",")
if select_lines == None:
outputs = InputSheetError()
return outputs
self.logger.info('Plotting only lines specified in Marmot_plot_select.csv')
self.logger.info(select_lines)
flow_diff = self["line_Flow"].get(self.Scenario_Diff[1]) - self["line_Flow"].get(self.Scenario_Diff[0])
xdim,ydim = self.set_x_y_dimension(len(select_lines))
grid_size = xdim * ydim
excess_axs = grid_size - len(select_lines)
mplt = PlotLibrary(ydim, xdim, squeeze=False,
ravel_axs=True)
fig, axs = mplt.get_figure()
plt.subplots_adjust(wspace=0.05, hspace=0.2)
reported_lines = self["line_Flow"].get(self.Scenarios[0]).index.get_level_values('line_name').unique()
n = -1
missing_lines = 0
chunks = []
for line in select_lines:
n += 1
#Remove leading spaces
if line[0] == ' ':
line = line[1:]
if line in reported_lines:
single_line = flow_diff.xs(line,level = 'line_name')
single_line.columns = [line]
if self.shift_leapday == True:
single_line = self.adjust_for_leapday(single_line)
single_line_out = single_line.copy()
if duration_curve:
single_line = self.sort_duration(single_line,line)
#mplt.lineplot(single_line,line, label = self.Scenario_Diff[1] + ' - \n' + self.Scenario_Diff[0] + '\n line flow', sub_pos = n)
mplt.lineplot(single_line,line, label = 'BESS - no BESS \n line flow', sub_pos=n)
else:
self.logger.warning(line + ' not found in results. Have you tagged it with the "Must Report" property in PLEXOS?')
excess_axs += 1
missing_lines += 1
continue
mplt.remove_excess_axs(excess_axs,grid_size)
axs[n].set_title(line)
if not duration_curve:
mplt.set_subplot_timeseries_format(sub_pos=n)
chunks.append(single_line_out)
if missing_lines == len(select_lines):
outputs = MissingInputData()
return outputs
Data_Table_Out = pd.concat(chunks,axis = 1)
mplt.add_legend()
plt.ylabel('Flow difference (MW)', color='black', rotation='vertical', labelpad=30)
plt.tight_layout()
fn_suffix = '_duration_curve' if duration_curve else ''
fig.savefig(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission',figure_name + fn_suffix + '.svg'), dpi=600, bbox_inches='tight')
Data_Table_Out.to_csv(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission',figure_name + fn_suffix + '.csv'))
outputs = DataSavedInModule()
return outputs
def line_flow_ind_seasonal(self, figure_name: str = None, prop: str = None,
start_date_range: str = None,
end_date_range: str = None, **_):
"""TODO: Finish Docstring.
This method differs from the previous method, in that it plots seasonal line limits.
To use this method, line import/export must be an "interval" property, not a "year" property.
This can be selected in "plexos_properties.csv".
Re-run the formatter if necessary, it will overwrite the existing properties in "*_formatted.h5"
This method plots flow, import and export limit, for individual transmission lines, with a facet for each line.
The lines are specified in the plot properties field of Marmot_plot_select.csv (column 4).
The plot includes every interchange that originates or ends in the aggregation zone.
Figures and data tables saved in the module.
Args:
figure_name (str, optional): [description]. Defaults to None.
prop (str, optional): [description]. Defaults to None.
start_date_range (str, optional): [description]. Defaults to None.
end_date_range (str, optional): [description]. Defaults to None.
Returns:
[type]: [description]
"""
#TODO: Use auto unit converter in method
if pd.isna(start_date_range):
self.logger.warning('You are attempting to plot a time series facetted by two seasons,\n\
but you are missing a value in the "Start Date" column of "Marmot_plot_select.csv" \
Please enter dates in "Start Date" and "End Date". These will define the bounds of \
one of your two seasons. The other season will be comprised of the rest of the year.')
return MissingInputData()
duration_curve=False
if 'duration_curve' in figure_name:
duration_curve = True
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"line_Flow",self.Scenarios),
(True,"line_Import_Limit",self.Scenarios),
(True,"line_Export_Limit",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
#Select only lines specified in Marmot_plot_select.csv.
select_lines = prop.split(",")
if select_lines == None:
return InputSheetError()
self.logger.info('Plotting only lines specified in Marmot_plot_select.csv')
self.logger.info(select_lines)
scenario = self.Scenarios[0]
#Line limits are seasonal.
export_limits = self["line_Export_Limit"].get(scenario).droplevel('timestamp')
export_limits.mask(export_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
export_limits = export_limits[export_limits[0].abs() < 99998] #Filter out unenforced lines.
import_limits = self["line_Import_Limit"].get(scenario).droplevel('timestamp')
import_limits.mask(import_limits[0]==0.0,other=0.01,inplace=True) #if limit is zero set to small value
import_limits = import_limits[import_limits[0].abs() < 99998] #Filter out unenforced lines.
#Extract time index
ti = self["line_Flow"][self.Scenarios[0]].index.get_level_values('timestamp').unique()
reported_lines = self["line_Flow"][self.Scenarios[0]].index.get_level_values('line_name').unique()
self.logger.info('Carving out season from ' + start_date_range + ' to ' + end_date_range)
#Remove missing interfaces from the list.
for line in select_lines:
#Remove leading spaces
if line[0] == ' ':
line = line[1:]
if line not in reported_lines:
self.logger.warning(line + ' not found in results.')
select_lines.remove(line)
if not select_lines:
outputs = MissingInputData()
return outputs
xdim = 2
ydim = len(select_lines)
grid_size = xdim * ydim
excess_axs = grid_size - len(select_lines)
mplt = PlotLibrary(ydim, xdim, squeeze=False)
fig, axs = mplt.get_figure()
i = -1
missing_lines = 0
chunks = []
limits_chunks = []
for line in select_lines:
i += 1
#Remove leading spaces
if line[0] == ' ':
line = line[1:]
chunks_line = []
single_exp_lim = export_limits.loc[line]
single_exp_lim.index = ti
single_imp_lim = import_limits.loc[line]
single_imp_lim.index = ti
limits = pd.concat([single_exp_lim,single_imp_lim],axis = 1)
limits.columns = ['export limit','import limit']
limits.index = ti
limits_chunks.append(limits)
for scenario in self.Scenarios:
flow = self["line_Flow"][scenario]
single_line = flow.xs(line,level = 'line_name')
single_line = single_line.droplevel('units')
single_line_out = single_line.copy()
single_line.columns = [line]
if self.shift_leapday == True:
single_line = self.adjust_for_leapday(single_line)
#Split into seasons.
summer = single_line[start_date_range : end_date_range]
winter = single_line.drop(summer.index)
summer_lim = limits[start_date_range:end_date_range]
winter_lim = limits.drop(summer.index)
if duration_curve:
summer = self.sort_duration(summer,line)
winter = self.sort_duration(winter,line)
summer_lim = self.sort_duration(summer_lim,'export limit')
winter_lim = self.sort_duration(winter_lim,'export limit')
axs[i,0].plot(summer[line],linewidth = 1,label = scenario + '\n line flow')
axs[i,1].plot(winter[line],linewidth = 1,label = scenario + '\n line flow')
if scenario == self.Scenarios[-1]:
for col in summer_lim:
limits_color_dict = {'export limit': 'red', 'import limit': 'green'}
axs[i,0].plot(summer_lim[col],linewidth = 1,linestyle = '--',color = limits_color_dict[col],label = col)
axs[i,1].plot(winter_lim[col],linewidth = 1,linestyle = '--',color = limits_color_dict[col],label = col)
for j in [0,1]:
axs[i,j].spines['right'].set_visible(False)
axs[i,j].spines['top'].set_visible(False)
axs[i,j].tick_params(axis='y', which='major', length=5, width=1)
axs[i,j].tick_params(axis='x', which='major', length=5, width=1)
axs[i,j].set_title(line)
if i == len(select_lines) - 1:
axs[i,j].legend(loc = 'lower left',bbox_to_anchor=(1.05,0),facecolor='inherit', frameon=True)
#For output time series .csv
scenario_names = pd.Series([scenario] * len(single_line_out),name = 'Scenario')
single_line_out.columns = [line]
single_line_out = single_line_out.set_index([scenario_names],append = True)
chunks_line.append(single_line_out)
Data_out_line = pd.concat(chunks_line,axis = 0)
chunks.append(Data_out_line)
if missing_lines == len(select_lines):
outputs = MissingInputData()
return outputs
Data_Table_Out = pd.concat(chunks,axis = 1)
#Limits_Out = pd.concat(limits_chunks,axis = 1)
#Limits_Out.index = ['Export Limit','Import Limit']
fig.text(0.3,1,'Summer (Jun - Sep)')
fig.text(0.6,1,'Winter (Jan - Mar,Oct - Dec)')
plt.ylabel('Flow (MW)', color='black', rotation='vertical', labelpad=30)
plt.tight_layout()
fn_suffix = '_duration_curve' if duration_curve else ''
fig.savefig(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission','Individual_Line_Flow' + fn_suffix + '_seasonal.svg'), dpi=600, bbox_inches='tight')
Data_Table_Out.to_csv(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission','Individual_Line_Flow' + fn_suffix + '_seasonal.csv'))
#Limits_Out.to_csv(os.path.join(self.Marmot_Solutions_folder, 'Figures_Output',self.AGG_BY + '_transmission','Individual_Line_Limits.csv'))
outputs = DataSavedInModule()
return outputs
def extract_tx_cap(self, **_):
"""Plot under development
Returns:
UnderDevelopment(): Exception class, plot is not functional.
"""
return UnderDevelopment() #TODO: Needs finishing
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"interface_Import_Limit",self.Scenarios),
(True,"interface_Export_Limit",self.Scenarios),
(True,"line_Import_Limit",self.Scenarios),
(True,"line_Export_Limit",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for scenario in self.Scenarios:
self.logger.info(scenario)
for zone_input in self.Zones:
#Lines
# lines = self.meta.region_interregionallines(scenario)
# if scenario == 'ADS':
# zone_input = zone_input.split('_WI')[0]
# lines = self.meta_ADS.region_interregionallines()
# lines = lines[lines['region'] == zone_input]
# import_lim = self["line_Import_Limit"][scenario].reset_index()
# export_lim = self["line_Export_Limit"][scenario].reset_index()
# lines = lines.merge(import_lim,how = 'inner',on = 'line_name')
# lines = lines[['line_name',0]]
# lines.columns = ['line_name','import_limit']
# lines = lines.merge(export_lim, how = 'inner',on = 'line_name')
# lines = lines[['line_name','import_limit',0]]
# lines.columns = ['line_name','import_limit','export_limit']
# fn = os.path.join(self.Marmot_Solutions_folder, 'NARIS', 'Figures_Output',self.AGG_BY + '_transmission','Individual_Interregional_Line_Limits_' + scenario + '.csv')
# lines.to_csv(fn)
# lines = self.meta.region_intraregionallines(scenario)
# if scenario == 'ADS':
# lines = self.meta_ADS.region_intraregionallines()
# lines = lines[lines['region'] == zone_input]
# import_lim = self["line_Import_Limit"][scenario].reset_index()
# export_lim = self["line_Export_Limit"][scenario].reset_index()
# lines = lines.merge(import_lim,how = 'inner',on = 'line_name')
# lines = lines[['line_name',0]]
# lines.columns = ['line_name','import_limit']
# lines = lines.merge(export_lim, how = 'inner',on = 'line_name')
# lines = lines[['line_name','import_limit',0]]
# lines.columns = ['line_name','import_limit','export_limit']
# fn = os.path.join(self.Marmot_Solutions_folder, 'NARIS', 'Figures_Output',self.AGG_BY + '_transmission','Individual_Intraregional_Line_Limits_' + scenario + '.csv')
# lines.to_csv(fn)
#Interfaces
PSCo_ints = ['P39 TOT 5_WI','P40 TOT 7_WI']
int_import_lim = self["interface_Import_Limit"][scenario].reset_index()
int_export_lim = self["interface_Export_Limit"][scenario].reset_index()
if scenario == 'NARIS':
last_timestamp = int_import_lim['timestamp'].unique()[-1] #Last because ADS uses the last timestamp.
int_import_lim = int_import_lim[int_import_lim['timestamp'] == last_timestamp]
int_export_lim = int_export_lim[int_export_lim['timestamp'] == last_timestamp]
lines2ints = self.meta_ADS.interface_lines()
else:
lines2ints = self.meta.interface_lines(scenario)
fn = os.path.join(self.Marmot_Solutions_folder, 'NARIS', 'Figures_Output',self.AGG_BY + '_transmission','test_meta_' + scenario + '.csv')
lines2ints.to_csv(fn)
ints = pd.merge(int_import_lim,int_export_lim,how = 'inner', on = 'interface_name')
ints.rename(columns = {'0_x':'import_limit','0_y': 'export_limit'},inplace = True)
all_lines_in_ints = lines2ints['line'].unique()
test = [line for line in lines['line_name'].unique() if line in all_lines_in_ints]
ints = ints.merge(lines2ints, how = 'inner', left_on = 'interface_name',right_on = 'interface')
def region_region_interchange_all_scenarios(self, **kwargs):
"""
#TODO: Finish Docstring
This method creates a timeseries line plot of interchange flows between the selected region
to each connecting region.
If there are more than 4 total interchanges, all other interchanges are aggregated into an 'other' grouping
Each scenarios is plotted on a separate Facet plot.
Figures and data tables are returned to plot_main
"""
outputs = self._region_region_interchange(self.Scenarios, **kwargs)
return outputs
def region_region_interchange_all_regions(self, **kwargs):
"""
#TODO: Finish Docstring
This method creates a timeseries line plot of interchange flows between the selected region
to each connecting region. All regions are plotted on a single figure with each focus region placed on a separate
facet plot
If there are more than 4 total interchanges, all other interchanges are aggregated into an 'other' grouping
This figure only plots a single scenario that is defined by Main_scenario_plot in user_defined_inputs.csv.
Figures and data tables are saved within method
"""
outputs = self._region_region_interchange([self.Scenarios[0]],plot_scenario=False, **kwargs)
return outputs
def _region_region_interchange(self, scenario_type: str, plot_scenario: bool = True,
timezone: str = "", **_):
"""#TODO: Finish Docstring
Args:
scenario_type (str): [description]
plot_scenario (bool, optional): [description]. Defaults to True.
timezone (str, optional): [description]. Defaults to "".
Returns:
[type]: [description]
"""
outputs = {}
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,f"{agg}_{agg}s_Net_Interchange",scenario_type)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
self.logger.info(f"Zone = {zone_input}")
ncols, nrows = self.set_facet_col_row_dimensions(multi_scenario=scenario_type)
mplt = PlotLibrary(nrows, ncols, sharey=True,
squeeze=False, ravel_axs=True)
fig, axs = mplt.get_figure()
plt.subplots_adjust(wspace=0.6, hspace=0.3)
data_table_chunks=[]
n=0
for scenario in scenario_type:
rr_int = self[f"{agg}_{agg}s_Net_Interchange"].get(scenario)
if self.shift_leapday == True:
rr_int = self.adjust_for_leapday(rr_int)
# For plot_main handeling - need to find better solution
if plot_scenario == False:
outputs={}
for zone_input in self.Zones:
outputs[zone_input] = pd.DataFrame()
if self.AGG_BY != 'region' and self.AGG_BY != 'zone':
agg_region_mapping = self.Region_Mapping[['region',self.AGG_BY]].set_index('region').to_dict()[self.AGG_BY]
# Checks if keys all aggregate to a single value, this plot requires multiple values to work
if len(set(agg_region_mapping.values())) == 1:
return UnsupportedAggregation()
rr_int = rr_int.reset_index()
rr_int['parent'] = rr_int['parent'].map(agg_region_mapping)
rr_int['child'] = rr_int['child'].map(agg_region_mapping)
rr_int_agg = rr_int.groupby(['timestamp','parent','child'],as_index=True).sum()
rr_int_agg.rename(columns = {0:'flow (MW)'}, inplace = True)
rr_int_agg = rr_int_agg.reset_index()
# If plotting all regions update plot setup
if plot_scenario == False:
#Make a facet plot, one panel for each parent zone.
parent_region = rr_int_agg['parent'].unique()
plot_number = len(parent_region)
ncols, nrows = self.set_x_y_dimension(plot_number)
mplt = PlotLibrary(nrows, ncols,
squeeze=False, ravel_axs=True)
fig, axs = mplt.get_figure()
plt.subplots_adjust(wspace=0.6, hspace=0.7)
else:
parent_region = [zone_input]
plot_number = len(scenario_type)
grid_size = ncols*nrows
excess_axs = grid_size - plot_number
for parent in parent_region:
single_parent = rr_int_agg[rr_int_agg['parent'] == parent]
single_parent = single_parent.pivot(index = 'timestamp',columns = 'child',values = 'flow (MW)')
single_parent = single_parent.loc[:,(single_parent != 0).any(axis = 0)] #Remove all 0 columns (uninteresting).
if (parent in single_parent.columns):
single_parent = single_parent.drop(columns = [parent]) #Remove columns if parent = child
#Neaten up lines: if more than 4 total interchanges, aggregated all but the highest 3.
if len(single_parent.columns) > 4:
# Set the "three highest zonal interchanges" for all three scenarios.
cols_dontagg = single_parent.max().abs().sort_values(ascending = False)[0:3].index
df_dontagg = single_parent[cols_dontagg]
df_toagg = single_parent.drop(columns = cols_dontagg)
agged = df_toagg.sum(axis = 1)
df_dontagg.insert(len(df_dontagg.columns),'Other',agged)
single_parent = df_dontagg.copy()
#Convert units
if n == 0:
unitconversion = self.capacity_energy_unitconversion(single_parent)
single_parent = single_parent / unitconversion['divisor']
for column in single_parent.columns:
mplt.lineplot(single_parent, column, label=column, sub_pos=n)
axs[n].set_title(parent)
axs[n].margins(x=0.01)
mplt.set_subplot_timeseries_format(sub_pos=n)
axs[n].hlines(y = 0, xmin = axs[n].get_xlim()[0], xmax = axs[n].get_xlim()[1], linestyle = ':') #Add horizontal line at 0.
axs[n].legend(loc='lower left',bbox_to_anchor=(1,0))
n+=1
# Create data table for each scenario
scenario_names = pd.Series([scenario]*len(single_parent),name='Scenario')
data_table = single_parent.add_suffix(f" ({unitconversion['units']})")
data_table = data_table.set_index([scenario_names],append=True)
data_table_chunks.append(data_table)
# if plotting all scenarios add facet labels
if plot_scenario == True:
mplt.add_facet_labels(xlabels=self.xlabels,
ylabels = self.ylabels)
#Remove extra axes
mplt.remove_excess_axs(excess_axs, grid_size)
plt.xlabel(timezone, color='black', rotation='horizontal',labelpad = 30)
plt.ylabel(f"Net Interchange ({unitconversion['units']})", color='black', rotation='vertical', labelpad = 40)
# If plotting all regions save output and return none plot_main
if plot_scenario == False:
# Location to save to
Data_Table_Out = rr_int_agg
save_figures = os.path.join(self.figure_folder, self.AGG_BY + '_transmission')
fig.savefig(os.path.join(save_figures, "Region_Region_Interchange_{}.svg".format(self.Scenarios[0])), dpi=600, bbox_inches='tight')
Data_Table_Out.to_csv(os.path.join(save_figures, "Region_Region_Interchange_{}.csv".format(self.Scenarios[0])))
outputs = DataSavedInModule()
return outputs
Data_Out = pd.concat(data_table_chunks, copy=False, axis=0)
# if plotting all scenarios return figures to plot_main
outputs[zone_input] = {'fig': fig,'data_table':Data_Out}
return outputs
def region_region_checkerboard(self, **_):
"""Creates a checkerboard/heatmap figure showing total interchanges between regions/zones.
Each scenario is plotted on its own facet plot.
Plots and Data are saved within the module.
Returns:
DataSavedInModule: DataSavedInModule exception.
"""
outputs = {}
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,f"{agg}_{agg}s_Net_Interchange",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
ncols, nrows = self.set_x_y_dimension(len(self.Scenarios))
grid_size = ncols*nrows
excess_axs = grid_size - len(self.Scenarios)
mplt = PlotLibrary(nrows, ncols,
squeeze=False, ravel_axs=True)
fig, axs = mplt.get_figure()
plt.subplots_adjust(wspace=0.02, hspace=0.4)
max_flow_group = []
Data_Out = []
n=0
for scenario in self.Scenarios:
rr_int = self[f"{agg}_{agg}s_Net_Interchange"].get(scenario)
if self.shift_leapday == True:
rr_int = self.adjust_for_leapday(rr_int)
if self.AGG_BY != 'region' and self.AGG_BY != 'zone':
agg_region_mapping = self.Region_Mapping[['region',self.AGG_BY]].set_index('region').to_dict()[self.AGG_BY]
# Checks if keys all aggregate to a single value, this plot requires multiple values to work
if len(set(agg_region_mapping.values())) == 1:
return UnsupportedAggregation()
rr_int = rr_int.reset_index()
rr_int['parent'] = rr_int['parent'].map(agg_region_mapping)
rr_int['child'] = rr_int['child'].map(agg_region_mapping)
rr_int_agg = rr_int.groupby(['parent','child'],as_index=True).sum()
rr_int_agg.rename(columns = {0:'flow (MW)'}, inplace = True)
rr_int_agg=rr_int_agg.loc[rr_int_agg['flow (MW)']>0.01] # Keep only positive flows
rr_int_agg.sort_values(ascending=False,by='flow (MW)')
rr_int_agg = rr_int_agg/1000 # MWh -> GWh
data_out = rr_int_agg.copy()
data_out.rename(columns={'flow (MW)':'{} flow (GWh)'.format(scenario)},inplace=True)
max_flow = max(rr_int_agg['flow (MW)'])
rr_int_agg = rr_int_agg.unstack('child')
rr_int_agg = rr_int_agg.droplevel(level = 0, axis = 1)
current_cmap = plt.cm.get_cmap()
current_cmap.set_bad(color='grey')
axs[n].imshow(rr_int_agg)
axs[n].set_xticks(np.arange(rr_int_agg.shape[1]))
axs[n].set_yticks(np.arange(rr_int_agg.shape[0]))
axs[n].set_xticklabels(rr_int_agg.columns)
axs[n].set_yticklabels(rr_int_agg.index)
axs[n].set_title(scenario.replace('_',' '),fontweight='bold')
# Rotate the tick labels and set their alignment.
plt.setp(axs[n].get_xticklabels(), rotation=30, ha="right",
rotation_mode="anchor")
#Delineate the boxes and make room at top and bottom
axs[n].set_xticks(np.arange(rr_int_agg.shape[1]+1)-.5, minor=True)
axs[n].set_yticks(np.arange(rr_int_agg.shape[0]+1)-.5, minor=True)
axs[n].grid(which="minor", color="k", linestyle='-', linewidth=1)
axs[n].tick_params(which="minor", bottom=False, left=False)
max_flow_group.append(max_flow)
Data_Out.append(data_out)
n+=1
#Remove extra axes
mplt.remove_excess_axs(excess_axs,grid_size)
cmap = cm.inferno
norm = mcolors.Normalize(vmin=0, vmax=max(max_flow_group))
cax = plt.axes([0.90, 0.1, 0.035, 0.8])
fig.colorbar(cm.ScalarMappable(norm=norm, cmap=cmap),
cax=cax, label='Total Net Interchange [GWh]')
plt.xlabel('To Region', color='black', rotation='horizontal',
labelpad=40)
plt.ylabel('From Region', color='black', rotation='vertical',
labelpad=40)
Data_Table_Out = pd.concat(Data_Out,axis=1)
save_figures = os.path.join(self.figure_folder, f"{self.AGG_BY}_transmission")
fig.savefig(os.path.join(save_figures, "region_region_checkerboard.svg"),
dpi=600, bbox_inches='tight')
Data_Table_Out.to_csv(os.path.join(save_figures, "region_region_checkerboard.csv"))
outputs = DataSavedInModule()
return outputs
def line_violations_timeseries(self, **kwargs):
"""Creates a timeseries line plot of lineflow violations for each region.
The magnitude of each violation is plotted on the y-axis
Each sceanrio is plotted as a separate line.
This methods calls _violations() to create the figure.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = self._violations(**kwargs)
return outputs
def line_violations_totals(self, **kwargs):
"""Creates a barplot of total lineflow violations for each region.
Each sceanrio is plotted as a separate bar.
This methods calls _violations() and passes the total_violations=True argument
to create the figure.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = self._violations(total_violations=True, **kwargs)
return outputs
def _violations(self, total_violations: bool = False,
timezone: str = "",
start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates line violation plots, line plot and barplots
This methods is called from line_violations_timeseries() and line_violations_totals()
Args:
total_violations (bool, optional): If True finds the sum of violations.
Used to create barplots. Defaults to False.
timezone (str, optional): The timezone to display on the x-axes.
Defaults to "".
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"line_Violation",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
self.logger.info(f'Zone = {zone_input}')
all_scenarios = pd.DataFrame()
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {str(scenario)}")
if self.AGG_BY == 'zone':
lines = self.meta.zone_lines(scenario)
else:
lines = self.meta.region_lines(scenario)
line_v = self["line_Violation"].get(scenario)
line_v = line_v.reset_index()
viol = line_v.merge(lines,on = 'line_name',how = 'left')
if self.AGG_BY == 'zone':
viol = viol.groupby(["timestamp", "zone"]).sum()
else:
viol = viol.groupby(["timestamp", self.AGG_BY]).sum()
one_zone = viol.xs(zone_input, level = self.AGG_BY)
one_zone = one_zone.rename(columns = {0 : scenario})
one_zone = one_zone.abs() #We don't care the direction of the violation
all_scenarios = pd.concat([all_scenarios,one_zone], axis = 1)
all_scenarios.columns = all_scenarios.columns.str.replace('_',' ')
#remove columns that are all equal to 0
all_scenarios = all_scenarios.loc[:, (all_scenarios != 0).any(axis=0)]
if all_scenarios.empty:
outputs[zone_input] = MissingZoneData()
continue
unitconversion = self.capacity_energy_unitconversion(all_scenarios)
all_scenarios = all_scenarios/unitconversion['divisor']
Data_Table_Out = all_scenarios.add_suffix(f" ({unitconversion['units']})")
#Make scenario/color dictionary.
color_dict = dict(zip(all_scenarios.columns,self.color_list))
mplt = PlotLibrary()
fig, ax = mplt.get_figure()
if total_violations==True:
all_scenarios_tot = all_scenarios.sum()
all_scenarios_tot.plot.bar(stacked=False, rot=0,
color=[color_dict.get(x, '#333333') for x in all_scenarios_tot.index],
linewidth='0.1', width=0.35, ax=ax)
else:
for column in all_scenarios:
mplt.lineplot(all_scenarios,column,color=color_dict,label=column)
ax.margins(x=0.01)
mplt.set_subplot_timeseries_format(minticks=6,maxticks=12)
ax.set_xlabel(timezone, color='black', rotation='horizontal')
mplt.add_legend()
if mconfig.parser("plot_title_as_region"):
fig.set_title(zone_input)
ax.set_ylabel(f"Line violations ({unitconversion['units']})", color='black', rotation='vertical')
outputs[zone_input] = {'fig': fig,'data_table':Data_Table_Out}
return outputs
def net_export(self, timezone: str = "",
start_date_range: str = None,
end_date_range: str = None, **_):
"""creates a timeseries net export line graph.
Scenarios are plotted as separate lines.
Args:
timezone (str, optional): The timezone to display on the x-axes.
Defaults to "".
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table
"""
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,f"{agg}_Net_Interchange",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
outputs = {}
for zone_input in self.Zones:
self.logger.info(f"{self.AGG_BY} = {zone_input}")
net_export_all_scenarios = pd.DataFrame()
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
net_export_read = self[f"{agg}_Net_Interchange"].get(scenario)
if self.shift_leapday == True:
net_export_read = self.adjust_for_leapday(net_export_read)
net_export = net_export_read.xs(zone_input, level = self.AGG_BY)
net_export = net_export.groupby("timestamp").sum()
net_export.columns = [scenario]
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
net_export = net_export[start_date_range : end_date_range]
net_export_all_scenarios = pd.concat([net_export_all_scenarios,net_export], axis = 1)
net_export_all_scenarios.columns = net_export_all_scenarios.columns.str.replace('_', ' ')
unitconversion = self.capacity_energy_unitconversion(net_export_all_scenarios)
net_export_all_scenarios = net_export_all_scenarios/unitconversion["divisor"]
# Data table of values to return to main program
Data_Table_Out = net_export_all_scenarios.add_suffix(f" ({unitconversion['units']})")
#Make scenario/color dictionary.
color_dict = dict(zip(net_export_all_scenarios.columns,self.color_list))
mplt = PlotLibrary()
fig, ax = mplt.get_figure()
plt.subplots_adjust(wspace=0.05, hspace=0.2)
if net_export_all_scenarios.empty:
out = MissingZoneData()
outputs[zone_input] = out
continue
for column in net_export_all_scenarios:
mplt.lineplot(net_export_all_scenarios,column,color_dict, label=column)
ax.set_ylabel(f'Net exports ({unitconversion["units"]})', color='black',
rotation='vertical')
ax.set_xlabel(timezone, color='black', rotation='horizontal')
ax.margins(x=0.01)
ax.hlines(y=0, xmin=ax.get_xlim()[0], xmax=ax.get_xlim()[1],
linestyle=':')
mplt.set_subplot_timeseries_format()
mplt.add_legend(reverse_legend=True)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
return outputs
def zonal_interchange(self, figure_name: str = None,
start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a line plot of the net interchange between each zone, with a facet for each zone.
The method will only work if agg_by = "zone".
The code will create either a timeseries or duration curve depending on
if the word 'duration_curve' is in the figure_name.
To make a duration curve, ensure the word 'duration_curve' is found in the figure_name.
Args:
figure_name (str, optional): User defined figure output name.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table
"""
if self.AGG_BY not in ["zone", "zones", "Zone", "Zones"]:
self.logger.warning("This plot only supports aggregation zone")
return UnsupportedAggregation()
duration_curve=False
if 'duration_curve' in figure_name:
duration_curve = True
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"line_Flow",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
outputs = {}
# sets up x, y dimensions of plot
ncols, nrows = self.set_facet_col_row_dimensions(multi_scenario=self.Scenarios)
grid_size = ncols*nrows
# Used to calculate any excess axis to delete
plot_number = len(self.Scenarios)
excess_axs = grid_size - plot_number
for zone_input in self.Zones:
self.logger.info(f"{self.AGG_BY} = {zone_input}")
mplt = PlotLibrary(nrows, ncols, sharey=True,
squeeze=False, ravel_axs=True)
fig, axs = mplt.get_figure()
plt.subplots_adjust(wspace=0.1, hspace=0.5)
net_exports_all = []
for n, scenario in enumerate(self.Scenarios):
net_exports = []
exp_lines = self.meta.zone_exporting_lines(scenario)
imp_lines = self.meta.zone_importing_lines(scenario)
if exp_lines.empty or imp_lines.empty:
return MissingMetaData()
exp_lines.columns = ['region','line_name']
imp_lines.columns = ['region','line_name']
#Find list of lines that connect each region.
exp_oz = exp_lines[exp_lines['region'] == zone_input]
imp_oz = imp_lines[imp_lines['region'] == zone_input]
other_zones = self.meta.zones(scenario).name.tolist()
try:
other_zones.remove(zone_input)
except:
self.logger.warning("Are you sure you set agg_by = zone?")
self.logger.info(f"Scenario = {str(scenario)}")
flow = self["line_Flow"][scenario].copy()
if self.shift_leapday == True:
flow = self.adjust_for_leapday(flow)
flow = flow.reset_index()
for other_zone in other_zones:
exp_other_oz = exp_lines[exp_lines['region'] == other_zone]
imp_other_oz = imp_lines[imp_lines['region'] == other_zone]
exp_pair = pd.merge(exp_oz, imp_other_oz, left_on='line_name',
right_on='line_name')
imp_pair = pd.merge(imp_oz, exp_other_oz, left_on='line_name',
right_on='line_name')
#Swap columns for importing lines
imp_pair = imp_pair.reindex(columns=['region_from', 'line_name', 'region_to'])
export = flow[flow['line_name'].isin(exp_pair['line_name'])]
imports = flow[flow['line_name'].isin(imp_pair['line_name'])]
export = export.groupby(['timestamp']).sum()
imports = imports.groupby(['timestamp']).sum()
#Check for situations where there are only exporting or importing lines for this zonal pair.
if imports.empty:
net_export = export
elif export.empty:
net_export = -imports
else:
net_export = export - imports
net_export.columns = [other_zone]
if pd.notna(start_date_range):
if other_zone == [other_zones[0]]:
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
net_export = net_export[start_date_range : end_date_range]
if duration_curve:
net_export = self.sort_duration(net_export,other_zone)
net_exports.append(net_export)
net_exports = pd.concat(net_exports,axis = 1)
net_exports = net_exports.dropna(axis = 'columns')
net_exports.index = pd.to_datetime(net_exports.index)
net_exports['Net export'] = net_exports.sum(axis = 1)
# unitconversion based off peak export hour, only checked once
if zone_input == self.Zones[0] and scenario == self.Scenarios[0]:
unitconversion = self.capacity_energy_unitconversion(net_exports)
net_exports = net_exports / unitconversion['divisor']
if duration_curve:
net_exports = net_exports.reset_index().drop(columns = 'index')
for column in net_exports:
linestyle = '--' if column == 'Net export' else 'solid'
mplt.lineplot(net_exports, column=column, label=column,
sub_pos=n, linestyle=linestyle)
axs[n].margins(x=0.01)
#Add horizontal line at 0.
axs[n].hlines(y=0, xmin=axs[n].get_xlim()[0], xmax=axs[n].get_xlim()[1],
linestyle=':')
if not duration_curve:
mplt.set_subplot_timeseries_format(sub_pos=n)
#Add scenario column to output table.
scenario_names = pd.Series([scenario] * len(net_exports), name='Scenario')
net_exports = net_exports.add_suffix(f" ({unitconversion['units']})")
net_exports = net_exports.set_index([scenario_names], append=True)
net_exports_all.append(net_exports)
mplt.add_facet_labels(xlabels=self.xlabels,
ylabels = self.ylabels)
mplt.add_legend()
#Remove extra axes
mplt.remove_excess_axs(excess_axs,grid_size)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
plt.ylabel(f"Net export ({unitconversion['units']})", color='black',
rotation='vertical', labelpad=40)
if duration_curve:
plt.xlabel('Sorted hour of the year', color='black', labelpad=30)
Data_Table_Out = pd.concat(net_exports_all)
# if plotting all scenarios return figures to plot_main
outputs[zone_input] = {'fig': fig,'data_table' : Data_Table_Out}
return outputs
def zonal_interchange_total(self, start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a barplot of the net interchange between each zone, separated by positive and negative flows.
The method will only work if agg_by = "zone".
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table
"""
if self.AGG_BY not in ["zone", "zones", "Zone", "Zones"]:
self.logger.warning("This plot only supports aggregation zone")
return UnsupportedAggregation()
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"line_Flow",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
outputs = {}
for zone_input in self.Zones:
self.logger.info(f"{self.AGG_BY} = {zone_input}")
mplt = PlotLibrary()
fig, ax = mplt.get_figure()
plt.subplots_adjust(wspace=0.05, hspace=0.2)
net_exports_all = []
# Holds each scenario output table
data_out_chunk = []
for n, scenario in enumerate(self.Scenarios):
exp_lines = self.meta.zone_exporting_lines(scenario)
imp_lines = self.meta.zone_importing_lines(scenario)
if exp_lines.empty or imp_lines.empty:
return MissingMetaData()
exp_lines.columns = ['region', 'line_name']
imp_lines.columns = ['region', 'line_name']
#Find list of lines that connect each region.
exp_oz = exp_lines[exp_lines['region'] == zone_input]
imp_oz = imp_lines[imp_lines['region'] == zone_input]
other_zones = self.meta.zones(scenario).name.tolist()
other_zones.remove(zone_input)
net_exports = []
self.logger.info(f"Scenario = {str(scenario)}")
flow = self["line_Flow"][scenario]
flow = flow.reset_index()
for other_zone in other_zones:
exp_other_oz = exp_lines[exp_lines['region'] == other_zone]
imp_other_oz = imp_lines[imp_lines['region'] == other_zone]
exp_pair = pd.merge(exp_oz, imp_other_oz, left_on='line_name',
right_on='line_name')
imp_pair = pd.merge(imp_oz, exp_other_oz, left_on='line_name',
right_on='line_name')
#Swap columns for importing lines
imp_pair = imp_pair.reindex(columns=['region_from', 'line_name', 'region_to'])
export = flow[flow['line_name'].isin(exp_pair['line_name'])]
imports = flow[flow['line_name'].isin(imp_pair['line_name'])]
export = export.groupby(['timestamp']).sum()
imports = imports.groupby(['timestamp']).sum()
#Check for situations where there are only exporting or importing lines for this zonal pair.
if imports.empty:
net_export = export
elif export.empty:
net_export = -imports
else:
net_export = export - imports
net_export.columns = [other_zone]
if pd.notna(start_date_range):
if other_zone == other_zones[0]:
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
net_export = net_export[start_date_range : end_date_range]
net_exports.append(net_export)
net_exports = pd.concat(net_exports, axis=1)
net_exports = net_exports.dropna(axis='columns')
net_exports.index = pd.to_datetime(net_exports.index)
net_exports['Net Export'] = net_exports.sum(axis=1)
positive = net_exports.agg(lambda x: x[x>0].sum())
negative = net_exports.agg(lambda x: x[x<0].sum())
both =
|
pd.concat([positive,negative], axis=1)
|
pandas.concat
|
import pandas as pd
import glob
import re
from itertools import product
populations = {
'Japan': 126_800_000,
'US' : 327_200_000,
'Germany' : 82_790_000,
'Italy' : 60_480_000,
'Spain' : 46_660_000,
'Belgium' : 11_400_000,
'Switzerland' : 8_570_000,
'Iran' : 81_160_000,
'Korea, South' : 51_470_000,
'United Kingdom' : 66_440_000,
'Netherlands' : 17_180_000,
'France' : 66_990_000
}
fnames = glob.glob(r"..\COVID-19\csse_covid_19_data\csse_covid_19_daily_reports\*.csv")
if len(fnames) == 0:
raise RuntimeError('You need the COVID-19 files (https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data)')
files = []
for f in fnames:
m = re.match('.*([0-9]{2})-([0-9]{2})-([0-9]{4}).csv', f)
if m != None:
M, D, Y = m.group(1), m.group(2), m.group(3)
files += [ ( (Y, M, D), f)]
else:
print('Ignore:', f)
countries = list(sorted(populations.keys()))
files = list(sorted(files))
labels = ['Deaths', 'Recovered', 'Confirmed']
dates = []
data_series = {}
for (c, l) in product(countries, labels):
data_series[(c, l)] = []
for YMD, f in files:
data =
|
pd.read_csv(f)
|
pandas.read_csv
|
import pandas as pd
import sys
from sklearn.naive_bayes import MultinomialNB
from scipy.stats import f_oneway
from mlxtend.evaluate import cochrans_q
from mlxtend.evaluate import mcnemar
from mlxtend.evaluate import mcnemar_table
import classifier_tool as tool
if __name__ == "__main__":
f_in = sys.argv[1]
df =
|
pd.read_json(f_in)
|
pandas.read_json
|
# -*- coding:utf-8 -*-
# !/usr/bin/env python
"""
Date: 2022/1/12 14:55
Desc: 东方财富网-数据中心-股东分析
https://data.eastmoney.com/gdfx/
"""
import pandas as pd
import requests
from tqdm import tqdm
def stock_gdfx_free_holding_statistics_em(date: str = "20210930") -> pd.DataFrame:
"""
东方财富网-数据中心-股东分析-股东持股统计-十大流通股东
https://data.eastmoney.com/gdfx/HoldingAnalyse.html
:param date: 报告期
:type date: str
:return: 十大流通股东
:rtype: pandas.DataFrame
"""
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "STATISTICS_TIMES,COOPERATION_HOLDER_MARK",
"sortTypes": "-1,-1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPT_COOPFREEHOLDERS_ANALYSISNEW",
"columns": "ALL",
"source": "WEB",
"client": "WEB",
"filter": f"""(HOLDNUM_CHANGE_TYPE="001")(END_DATE='{'-'.join([date[:4], date[4:6], date[6:]])}')""",
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json["result"]["pages"]
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1)):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
big_df.columns = [
"序号",
"-",
"-",
"股东名称",
"股东类型",
"-",
"统计次数",
"公告日后涨幅统计-10个交易日-平均涨幅",
"公告日后涨幅统计-10个交易日-最大涨幅",
"公告日后涨幅统计-10个交易日-最小涨幅",
"公告日后涨幅统计-30个交易日-平均涨幅",
"公告日后涨幅统计-30个交易日-最大涨幅",
"公告日后涨幅统计-30个交易日-最小涨幅",
"公告日后涨幅统计-60个交易日-平均涨幅",
"公告日后涨幅统计-60个交易日-最大涨幅",
"公告日后涨幅统计-60个交易日-最小涨幅",
"持有个股",
]
big_df = big_df[
[
"序号",
"股东名称",
"股东类型",
"统计次数",
"公告日后涨幅统计-10个交易日-平均涨幅",
"公告日后涨幅统计-10个交易日-最大涨幅",
"公告日后涨幅统计-10个交易日-最小涨幅",
"公告日后涨幅统计-30个交易日-平均涨幅",
"公告日后涨幅统计-30个交易日-最大涨幅",
"公告日后涨幅统计-30个交易日-最小涨幅",
"公告日后涨幅统计-60个交易日-平均涨幅",
"公告日后涨幅统计-60个交易日-最大涨幅",
"公告日后涨幅统计-60个交易日-最小涨幅",
"持有个股",
]
]
big_df["统计次数"] = pd.to_numeric(big_df["统计次数"])
big_df["公告日后涨幅统计-10个交易日-平均涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-10个交易日-平均涨幅"])
big_df["公告日后涨幅统计-10个交易日-最大涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-10个交易日-最大涨幅"])
big_df["公告日后涨幅统计-10个交易日-最小涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-10个交易日-最小涨幅"])
big_df["公告日后涨幅统计-30个交易日-平均涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-30个交易日-平均涨幅"])
big_df["公告日后涨幅统计-30个交易日-最大涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-30个交易日-最大涨幅"])
big_df["公告日后涨幅统计-30个交易日-最小涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-30个交易日-最小涨幅"])
big_df["公告日后涨幅统计-60个交易日-平均涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-60个交易日-平均涨幅"])
big_df["公告日后涨幅统计-60个交易日-最大涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-60个交易日-最大涨幅"])
big_df["公告日后涨幅统计-60个交易日-最小涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-60个交易日-最小涨幅"])
return big_df
def stock_gdfx_holding_statistics_em(date: str = "20210930") -> pd.DataFrame:
"""
东方财富网-数据中心-股东分析-股东持股统计-十大股东
https://data.eastmoney.com/gdfx/HoldingAnalyse.html
:param date: 报告期
:type date: str
:return: 十大股东
:rtype: pandas.DataFrame
"""
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "STATISTICS_TIMES,COOPERATION_HOLDER_MARK",
"sortTypes": "-1,-1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPT_COOPHOLDERS_ANALYSIS",
"columns": "ALL",
"source": "WEB",
"client": "WEB",
"filter": f"""(HOLDNUM_CHANGE_TYPE="001")(END_DATE='{'-'.join([date[:4], date[4:6], date[6:]])}')""",
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json["result"]["pages"]
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1)):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
big_df.columns = [
"序号",
"-",
"-",
"股东名称",
"股东类型",
"-",
"统计次数",
"公告日后涨幅统计-10个交易日-平均涨幅",
"公告日后涨幅统计-10个交易日-最大涨幅",
"公告日后涨幅统计-10个交易日-最小涨幅",
"公告日后涨幅统计-30个交易日-平均涨幅",
"公告日后涨幅统计-30个交易日-最大涨幅",
"公告日后涨幅统计-30个交易日-最小涨幅",
"公告日后涨幅统计-60个交易日-平均涨幅",
"公告日后涨幅统计-60个交易日-最大涨幅",
"公告日后涨幅统计-60个交易日-最小涨幅",
"持有个股",
]
big_df = big_df[
[
"序号",
"股东名称",
"股东类型",
"统计次数",
"公告日后涨幅统计-10个交易日-平均涨幅",
"公告日后涨幅统计-10个交易日-最大涨幅",
"公告日后涨幅统计-10个交易日-最小涨幅",
"公告日后涨幅统计-30个交易日-平均涨幅",
"公告日后涨幅统计-30个交易日-最大涨幅",
"公告日后涨幅统计-30个交易日-最小涨幅",
"公告日后涨幅统计-60个交易日-平均涨幅",
"公告日后涨幅统计-60个交易日-最大涨幅",
"公告日后涨幅统计-60个交易日-最小涨幅",
"持有个股",
]
]
big_df["统计次数"] = pd.to_numeric(big_df["统计次数"])
big_df["公告日后涨幅统计-10个交易日-平均涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-10个交易日-平均涨幅"])
big_df["公告日后涨幅统计-10个交易日-最大涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-10个交易日-最大涨幅"])
big_df["公告日后涨幅统计-10个交易日-最小涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-10个交易日-最小涨幅"])
big_df["公告日后涨幅统计-30个交易日-平均涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-30个交易日-平均涨幅"])
big_df["公告日后涨幅统计-30个交易日-最大涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-30个交易日-最大涨幅"])
big_df["公告日后涨幅统计-30个交易日-最小涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-30个交易日-最小涨幅"])
big_df["公告日后涨幅统计-60个交易日-平均涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-60个交易日-平均涨幅"])
big_df["公告日后涨幅统计-60个交易日-最大涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-60个交易日-最大涨幅"])
big_df["公告日后涨幅统计-60个交易日-最小涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-60个交易日-最小涨幅"])
return big_df
def stock_gdfx_free_holding_change_em(date: str = "20210930") -> pd.DataFrame:
"""
东方财富网-数据中心-股东分析-股东持股变动统计-十大流通股东
https://data.eastmoney.com/gdfx/HoldingAnalyse.html
:param date: 报告期
:type date: str
:return: 十大流通股东
:rtype: pandas.DataFrame
"""
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "HOLDER_NUM,HOLDER_NEW",
"sortTypes": "-1,-1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPT_FREEHOLDERS_BASIC_INFONEW",
"columns": "ALL",
"source": "WEB",
"client": "WEB",
"filter": f"(END_DATE='{'-'.join([date[:4], date[4:6], date[6:]])}')",
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json["result"]["pages"]
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1)):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
big_df.columns = [
"序号",
"-",
"-",
"股东名称",
"-",
"股东类型",
"-",
"-",
"-",
"期末持股只数统计-总持有",
"期末持股只数统计-新进",
"期末持股只数统计-增加",
"期末持股只数统计-减少",
"期末持股只数统计-不变",
"-",
"流通市值统计",
"持有个股",
"-",
"-",
]
big_df = big_df[
[
"序号",
"股东名称",
"股东类型",
"期末持股只数统计-总持有",
"期末持股只数统计-新进",
"期末持股只数统计-增加",
"期末持股只数统计-不变",
"期末持股只数统计-减少",
"流通市值统计",
"持有个股",
]
]
big_df["期末持股只数统计-总持有"] = pd.to_numeric(big_df["期末持股只数统计-总持有"])
big_df["期末持股只数统计-新进"] = pd.to_numeric(big_df["期末持股只数统计-新进"])
big_df["期末持股只数统计-增加"] = pd.to_numeric(big_df["期末持股只数统计-增加"])
big_df["期末持股只数统计-不变"] = pd.to_numeric(big_df["期末持股只数统计-不变"])
big_df["期末持股只数统计-减少"] = pd.to_numeric(big_df["期末持股只数统计-减少"])
big_df["流通市值统计"] = pd.to_numeric(big_df["流通市值统计"])
return big_df
def stock_gdfx_holding_change_em(date: str = "20210930") -> pd.DataFrame:
"""
东方财富网-数据中心-股东分析-股东持股变动统计-十大股东
https://data.eastmoney.com/gdfx/HoldingAnalyse.html
:param date: 报告期
:type date: str
:return: 十大流通股东
:rtype: pandas.DataFrame
"""
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "HOLDER_NUM,HOLDER_NEW",
"sortTypes": "-1,-1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPT_HOLDERS_BASIC_INFO",
"columns": "ALL",
"source": "WEB",
"client": "WEB",
"filter": f"(END_DATE='{'-'.join([date[:4], date[4:6], date[6:]])}')",
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json["result"]["pages"]
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1)):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df =
|
pd.DataFrame(data_json["result"]["data"])
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 3 11:12:51 2020
@author: sahand
This is a preprocessing script for Cora dataset [McCallumIRJ]
@article{McCallumIRJ,
author = "<NAME> and <NAME> and <NAME> and <NAME>",
title = "Automating the Construction of Internet Portals with Machine Learning",
journal = "Information Retrieval Journal",
volume = 3,
pages = "127--163",
publisher = "Kluwer",
year = 2000,
note = "www.research.whizbang.com/data"
}
"""
# =============================================================================
# Init
# =============================================================================
dir_path = '/home/sahand/GoogleDrive/Data/Corpus/cora-classify/cora/clean/single_component_small/' # ryzen
# dir_path = '/mnt/6016589416586D52/Users/z5204044/GoogleDrive/GoogleDrive/Data/Corpus/cora-classify/cora/' # c1314
import json
from os import listdir
from os.path import isfile, join
import pandas as pd
import numpy as np
import networkx as nx
from tqdm import tqdm
import gc
tqdm.pandas()
from sciosci.assets import text_assets
# =============================================================================
# read JSON and lists from Cora data
# =============================================================================
papers_list_raw = pd.read_csv(dir_path+'papers',sep='\t',names=['id','filename','citation string']) # contains duplicates
# papers_list_raw = papers_list_raw.groupby('id').first().reset_index()
papers_list_labeled = pd.read_csv(dir_path+'classifications',sep='\t',names=['filename','class'])
papers_list_labeled = papers_list_labeled[pd.notna(papers_list_labeled['class'])]
citations = pd.read_csv(dir_path+'citations',names=['referring_id','cited_id'],sep='\t')
# =============================================================================
# Prepare classes
# =============================================================================
def cleanup(arr):
try:
return np.array([x for x in arr if x!=''])
except:
print('\nGot',arr,', which is not a list. returning as-is.')
return np.array(arr)
labels = pd.DataFrame(list(papers_list_labeled['class'].str.split('/').progress_apply(lambda x: cleanup(x))))
labels.columns = ['class1','class2','class3']
papers_list_labeled = pd.concat([papers_list_labeled,labels],axis=1)
# Inspect classes
label_names = [str(x) for x in list(labels.groupby('class1').groups.keys())]
# =============================================================================
# Read text files
# =============================================================================
mypath = dir_path+'extractions'
files = [f for f in listdir(mypath) if isfile(join(mypath, f))]
columns = ['filename','URL','Refering-URL','Root-URL','Email','Title','Author','Address','Affiliation','Note','Abstract','References-found']
papers_df = pd.DataFrame([],columns=columns)
log = []
for f_name in tqdm(files):
# f_name = 'http:##www.win.tue.nl#win#cs#fm#Dennis.Dams#Papers#dgg95a.ps.gz'
f = open(join(mypath,f_name), "r")
paper = [['filename',f_name]]
try:
tmp = f.read().split('\n')
except:
print('Failed to read file ',f_name,'\nLook at the final log for the list of such files.')
log.append(['reading failed',f_name])
continue
for line in tmp:
if line!='':
ar = line.split(': ', 1)
if len(ar)>1:
paper.append(ar)
paper_np = np.array(paper)
paper = pd.DataFrame(paper_np.T[1])
paper.index = paper_np.T[0]
paper = paper.T
paper = paper[paper.columns[paper.columns.isin(columns)]]
# papers_df = papers_df.append(paper)[papers_df.columns]
try:
papers_df = pd.concat([papers_df,paper])
except:
print('Something went wrong when concatenating the file',f_name,'\nLook at the final log for the list of such files.')
log.append(['concatenating failed',f_name])
papers_df.to_csv(dir_path+'extractions.csv',index=False)
log=pd.DataFrame(log,columns=['error','file'])
log.to_csv(dir_path+'extractions_log')
# =============================================================================
# Merge based on file name to get the idx
# =============================================================================
merged = pd.merge(papers_df, papers_list_raw, on='filename')
merged.to_csv(dir_path+'extractions_with_id.csv',index=False)
sample = merged.sample(5)
# =============================================================================
# Further pre-process to get unique abstracts
# =============================================================================
data = merged.copy()
data = pd.read_csv(dir_path+'extractions_with_id.csv')
# =============================================================================
# Merge based on file name to get the idx
# =============================================================================
merged = pd.merge(papers_list_labeled, data, on='filename')
merged.to_csv(dir_path+'extractions_with_unique_id_labeled.csv',index=False)
sample = merged.sample(5)
# =============================================================================
#
# =============================================================================
data = merged.copy()
data = pd.read_csv(dir_path+'extractions_with_id.csv')
data_clean = data[
|
pd.notna(data['Abstract'])
|
pandas.notna
|
import numpy as np
import pandas as pd
import pytest
from etna.datasets import TSDataset
from etna.datasets import generate_ar_df
from etna.datasets import generate_const_df
from etna.datasets import generate_periodic_df
from etna.metrics import R2
from etna.models import LinearPerSegmentModel
from etna.transforms import FilterFeaturesTransform
from etna.transforms.encoders.categorical import LabelEncoderTransform
from etna.transforms.encoders.categorical import OneHotEncoderTransform
@pytest.fixture
def two_df_with_new_values():
d = {
"timestamp": list(pd.date_range(start="2021-01-01", end="2021-01-03"))
+ list(pd.date_range(start="2021-01-01", end="2021-01-03")),
"segment": ["segment_0", "segment_0", "segment_0", "segment_1", "segment_1", "segment_1"],
"regressor_0": [5, 8, 5, 9, 5, 9],
"target": [1, 2, 3, 4, 5, 6],
}
df1 = TSDataset.to_dataset(pd.DataFrame(d))
d = {
"timestamp": list(pd.date_range(start="2021-01-01", end="2021-01-03"))
+ list(pd.date_range(start="2021-01-01", end="2021-01-03")),
"segment": ["segment_0", "segment_0", "segment_0", "segment_1", "segment_1", "segment_1"],
"regressor_0": [5, 8, 9, 5, 0, 0],
"target": [1, 2, 3, 4, 5, 6],
}
df2 = TSDataset.to_dataset(pd.DataFrame(d))
return df1, df2
@pytest.fixture
def df_for_ohe_encoding():
df_to_forecast = generate_ar_df(10, start_time="2021-01-01", n_segments=1)
d = {
"timestamp":
|
pd.date_range(start="2021-01-01", end="2021-01-12")
|
pandas.date_range
|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + Timestamp('2000')
with pytest.raises(OverflowError, match=msg):
Timestamp('2000') + pd.to_timedelta([106580], 'D')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
(pd.to_timedelta([_NaT, '5 days', '1 hours']) -
pd.to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = pd.to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (pd.to_timedelta([pd.NaT, '5 days', '1 hours']) +
pd.to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
with pytest.raises(TypeError):
df1 + np.nan
with pytest.raises(TypeError):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range('2012-1-1', periods=3, freq='D')
v2 = pd.date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
tm.assert_series_equal(rs, xp)
assert rs.dtype == 'timedelta64[ns]'
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
# series on the rhs
result = df['A'] - df['A'].shift()
assert result.dtype == 'timedelta64[ns]'
result = df['A'] + td
assert result.dtype == 'M8[ns]'
# scalar Timestamp on rhs
maxa = df['A'].max()
assert isinstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
assert resultb.dtype == 'timedelta64[ns]'
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
tm.assert_series_equal(result, expected)
assert result.dtype == 'm8[ns]'
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
assert resulta.dtype == 'm8[ns]'
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df['A'])
assert resultb.dtype == 'M8[ns]'
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(df['A'], resultb)
assert resultb.dtype == 'M8[ns]'
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
nat_series_dtype_timedelta = Series([NaT, NaT],
dtype='timedelta64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
tm.assert_series_equal(timedelta_series - NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
# addition
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
# multiplication
tm.assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
tm.assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(timedelta_series * np.nan,
nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series,
nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / np.nan,
nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box_with_array):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
def test_td64arr_add_sub_float(self, box_with_array, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdarr = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdarr + other
with pytest.raises(TypeError):
other + tdarr
with pytest.raises(TypeError):
tdarr - other
with pytest.raises(TypeError):
other - tdarr
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box_with_array, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box_with_array)
msg = ("cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation")
with pytest.raises(TypeError, match=msg):
idx - Timestamp('2011-01-01')
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp('2011-01-01', tz=tz)
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
def test_td64arr_add_sub_timestamp(self, box_with_array):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdi = timedelta_range('1 day', periods=3)
expected = pd.date_range('2012-01-02', periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range('2011-12-31', periods=3, freq='-1D')
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
with pytest.raises(TypeError):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64('NaT')
tdi = timedelta_range('1 day', periods=3)
expected = pd.DatetimeIndex(["NaT", "NaT", "NaT"])
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
# Operations with int-like others
def test_td64arr_add_int_series_invalid(self, box):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
int_ser = Series([2, 3, 4])
with pytest.raises(err):
tdser + int_ser
with pytest.raises(err):
int_ser + tdser
with pytest.raises(err):
tdser - int_ser
with pytest.raises(err):
int_ser - tdser
def test_td64arr_add_intlike(self, box_with_array):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box_with_array)
err = TypeError
if box_with_array in [pd.Index, tm.to_array]:
err = NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box_with_array,
scalar):
box = box_with_array
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box in [pd.Index, tm.to_array] and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
# TODO: this was taken from tests.series.test_ops; de-duplicate
@pytest.mark.parametrize('scalar_td', [timedelta(minutes=5, seconds=4),
Timedelta(minutes=5, seconds=4),
Timedelta('5m4s').to_timedelta64()])
def test_operators_timedelta64_with_timedelta(self, scalar_td):
# smoke tests
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 + scalar_td
scalar_td + td1
td1 - scalar_td
scalar_td - td1
td1 / scalar_td
scalar_td / td1
# TODO: this was taken from tests.series.test_ops; de-duplicate
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
def test_td64arr_add_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
if box is pd.DataFrame and names[1] == 'Venkman':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_add_sub_td64_nat(self, box):
# GH#23320 special handling for timedelta64("NaT")
tdi = pd.TimedeltaIndex([NaT, Timedelta('1s')])
other = np.timedelta64("NaT")
expected = pd.TimedeltaIndex(["NaT"] * 2)
obj = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
result = other - obj
tm.assert_equal(result, expected)
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - two_hours
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
# TODO: this was taken from tests.series.test_operators; de-duplicate
def test_timedelta64_operations_with_DateOffset(self):
# GH#10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(PerformanceWarning):
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3),
timedelta(minutes=5, seconds=6),
timedelta(hours=2, minutes=5, seconds=3)])
tm.assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
tm.assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box):
# GH#18849
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box):
# GH#18824, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box_df_fail):
# GH#18849
box = box_df_fail
box2 = Series if box in [pd.Index, tm.to_array] else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox,
box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box_with_array)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps:
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# TODO: Moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize("m", [1, 3, 10])
@pytest.mark.parametrize("unit", ['D', 'h', 'm', 's', 'ms', 'us', 'ns'])
def test_timedelta64_conversions(self, m, unit):
startdate = Series(pd.date_range('2013-01-01', '2013-01-03'))
enddate = Series(pd.date_range('2013-03-01', '2013-03-03'))
ser = enddate - startdate
ser[2] = np.nan
# op
expected = Series([x / np.timedelta64(m, unit) for x in ser])
result = ser / np.timedelta64(m, unit)
tm.assert_series_equal(result, expected)
# reverse op
expected = Series([Timedelta(np.timedelta64(m, unit)) / x
for x in ser])
result = np.timedelta64(m, unit) / ser
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, two_hours, box_with_array):
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng * two_hours
def test_tdi_mul_int_array_zerodim(self, box_with_array):
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected =
|
TimedeltaIndex(rng5 * 5)
|
pandas.TimedeltaIndex
|
import pytest
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
from cascade.dismod.constants import DensityEnum
from cascade.executor.execution_context import make_execution_context
from cascade.stats import meas_bounds_to_stdev
from cascade.input_data.emr import (
_emr_from_sex_and_node_specific_csmr_and_prevalence,
_make_interpolators,
_prepare_csmr,
_collapse_times,
_collapse_ages_unweighted,
_collapse_ages_weighted,
)
POINT_PREVALENCE = pd.DataFrame(
{
"age_lower": [0, 1, 10, 15, 20] * 2,
"age_upper": [0, 1, 10, 15, 20] * 2,
"time_lower": [1990] * 5 + [1995] * 5,
"time_upper": [1990] * 5 + [1995] * 5,
"sex_id": [3] * 5 * 2,
"node_id": [6] * 5 * 2,
"density": [DensityEnum.gaussian] * 5 * 2,
"weight": ["constant"] * 5 * 2,
"mean": [0.01, 0.02, 0.03, 0.04, 0.05] * 2,
"standard_error": [0.005, 0.004, 0.003, 0.002, 0.001] * 2,
"measure": ["prevalence"] * 5 * 2,
}
)
POINT_CSMR = pd.DataFrame(
{
"age": [0, 1, 10, 15, 20] * 2,
"age_lower": [0, 1, 10, 15, 20] * 2,
"age_upper": [0, 1, 10, 15, 20] * 2,
"time": [1990] * 5 + [1995] * 5,
"time_lower": [1990] * 5 + [1995] * 5,
"time_upper": [1990] * 5 + [1995] * 5,
"sex_id": [3] * 5 * 2,
"node_id": [6] * 5 * 2,
"mean": [0.006, 0.007, 0.008, 0.009, 0.01] * 2,
"standard_error": [0.0005, 0.0004, 0.0003, 0.0002, 0.0001] * 2,
}
)
SPAN_PREVALENCE = pd.DataFrame(
{
"age_lower": [0, 3, 20, 50, 70, 80],
"age_upper": [0.001, 4, 25, 55, 90, 80],
"time_lower": [1990, 1995, 2000, 2005, 2010, 2015],
"time_upper": [1995, 2000, 2005, 2010, 2015, 2020],
"sex_id": [3] * 6,
"node_id": [6] * 6,
"density": [DensityEnum.gaussian] * 6,
"weight": ["constant"] * 6,
"mean": [0.01, 0.02, 0.03, 0.04, 0.05, 0.06],
"standard_error": [0.005, 0.004, 0.003, 0.002, 0.001, 0.001],
"measure": ["prevalence"] * 6,
}
)
@pytest.fixture(scope="module")
def csmr_surface():
a = -0.01
b = -0.02
c = 1.0
csmr_mean = lambda age, time: -(-a * 0 - b * 1980 - c * 0.001 + a * age + b * time) / c
csmr_rows = []
for age in range(0, 120):
for time in range(1980, 2040):
csmr_rows.append(
{
"age": age,
"time": time,
"sex_id": 3,
"node_id": 6,
"mean": csmr_mean(age, time),
"standard_error": 0.01,
}
)
return pd.DataFrame(csmr_rows), csmr_mean
def test_interpolators__points_only(csmr_surface):
csmr, source = csmr_surface
interps, _ = _make_interpolators(csmr)
for age in np.linspace(0, 100, 200):
for time in np.linspace(1990, 2018, 50):
assert np.isclose(interps["both"](age, time), source(age, time))
def test_interpolators__across_ages(csmr_surface):
csmr, source = csmr_surface
interps, stderr_interp = _make_interpolators(csmr)
for age in np.linspace(0, 100, 200):
mean = np.mean([source(age, time) for time in np.linspace(1980, 2040, 100)])
assert abs(interps["age"](age) - mean) < stderr_interp["age"](age) * 2
def test_interpolators__across_times(csmr_surface):
csmr, source = csmr_surface
interps, stderr_interp = _make_interpolators(csmr)
for time in np.linspace(1990, 2018, 50):
mean = np.mean([source(age, time) for age in np.linspace(0, 120, 200)])
assert abs(interps["time"](time) - mean) < stderr_interp["time"](time) * 2
def test_emr_from_sex_and_node_specific_csmr_and_prevalence__perfect_alignment__points_only():
emr = _emr_from_sex_and_node_specific_csmr_and_prevalence(POINT_CSMR, POINT_PREVALENCE)
emr = emr.set_index(["age_lower", "age_upper", "time_lower", "time_upper", "sex_id", "node_id"])
csmr = POINT_CSMR.set_index(["age_lower", "age_upper", "time_lower", "time_upper", "sex_id", "node_id"])
prevalence = POINT_PREVALENCE.set_index(["age_lower", "age_upper", "time_lower", "time_upper", "sex_id", "node_id"])
assert len(emr) == len(prevalence)
assert np.allclose(emr["mean"], csmr["mean"] / prevalence["mean"])
def test_emr_from_sex_and_node_specific_csmr_and_prevalence__perfect_alignment__spans(csmr_surface):
csmr, source = csmr_surface
emr = _emr_from_sex_and_node_specific_csmr_and_prevalence(csmr, SPAN_PREVALENCE)
assert len(emr) == len(SPAN_PREVALENCE)
for (_, pr), (_, er) in zip(SPAN_PREVALENCE.iterrows(), emr.iterrows()):
pmean = pr["mean"]
cmean = source((pr["age_lower"] + pr["age_upper"]) / 2, (pr["time_lower"] + pr["time_upper"]) / 2)
assert er["mean"] - cmean / pmean < er["standard_error"]
def test_collapse_times():
df = pd.DataFrame({"time_lower": [1990, 1990, 1990], "time_upper": [1990, 1995, 2000]})
df_new = _collapse_times(df)
assert all(df_new["time"] == [1990, 1992.5, 1995])
def test_collapse_ages_unweighted():
df =
|
pd.DataFrame({"age_lower": [0, 1, 3, 53, 100.5, 1000000], "age_upper": [0.01, 2, 4.5, 57, 101, 1000005]})
|
pandas.DataFrame
|
from datetime import datetime
import numpy as np
import pytest
from pandas import DataFrame, Index, MultiIndex, RangeIndex, Series
import pandas.util.testing as tm
class TestSeriesAlterAxes:
def test_setindex(self, string_series):
# wrong type
msg = (
r"Index\(\.\.\.\) must be called with a collection of some"
r" kind, None was passed"
)
with pytest.raises(TypeError, match=msg):
string_series.index = None
# wrong length
msg = (
"Length mismatch: Expected axis has 30 elements, new"
" values have 29 elements"
)
with pytest.raises(ValueError, match=msg):
string_series.index = np.arange(len(string_series) - 1)
# works
string_series.index = np.arange(len(string_series))
assert isinstance(string_series.index, Index)
# Renaming
def test_rename(self, datetime_series):
ts = datetime_series
renamer = lambda x: x.strftime("%Y%m%d")
renamed = ts.rename(renamer)
assert renamed.index[0] == renamer(ts.index[0])
# dict
rename_dict = dict(zip(ts.index, renamed.index))
renamed2 = ts.rename(rename_dict)
tm.assert_series_equal(renamed, renamed2)
# partial dict
s = Series(np.arange(4), index=["a", "b", "c", "d"], dtype="int64")
renamed = s.rename({"b": "foo", "d": "bar"})
tm.assert_index_equal(renamed.index, Index(["a", "foo", "c", "bar"]))
# index with name
renamer = Series(
np.arange(4), index=Index(["a", "b", "c", "d"], name="name"), dtype="int64"
)
renamed = renamer.rename({})
assert renamed.index.name == renamer.index.name
def test_rename_by_series(self):
s = Series(range(5), name="foo")
renamer = Series({1: 10, 2: 20})
result = s.rename(renamer)
expected = Series(range(5), index=[0, 10, 20, 3, 4], name="foo")
tm.assert_series_equal(result, expected)
def test_rename_set_name(self):
s = Series(range(4), index=list("abcd"))
for name in ["foo", 123, 123.0, datetime(2001, 11, 11), ("foo",)]:
result = s.rename(name)
assert result.name == name
tm.assert_numpy_array_equal(result.index.values, s.index.values)
assert s.name is None
def test_rename_set_name_inplace(self):
s = Series(range(3), index=list("abc"))
for name in ["foo", 123, 123.0, datetime(2001, 11, 11), ("foo",)]:
s.rename(name, inplace=True)
assert s.name == name
exp = np.array(["a", "b", "c"], dtype=np.object_)
tm.assert_numpy_array_equal(s.index.values, exp)
def test_rename_axis_supported(self):
# Supporting axis for compatibility, detailed in GH-18589
s = Series(range(5))
s.rename({}, axis=0)
s.rename({}, axis="index")
with pytest.raises(ValueError, match="No axis named 5"):
s.rename({}, axis=5)
def test_set_name_attribute(self):
s = Series([1, 2, 3])
s2 = Series([1, 2, 3], name="bar")
for name in [7, 7.0, "name", datetime(2001, 1, 1), (1,), "\u05D0"]:
s.name = name
assert s.name == name
s2.name = name
assert s2.name == name
def test_set_name(self):
s = Series([1, 2, 3])
s2 = s._set_name("foo")
assert s2.name == "foo"
assert s.name is None
assert s is not s2
def test_rename_inplace(self, datetime_series):
renamer = lambda x: x.strftime("%Y%m%d")
expected = renamer(datetime_series.index[0])
datetime_series.rename(renamer, inplace=True)
assert datetime_series.index[0] == expected
def test_set_index_makes_timeseries(self):
idx = tm.makeDateIndex(10)
s = Series(range(10))
s.index = idx
assert s.index.is_all_dates
def test_reset_index(self):
df = tm.makeDataFrame()[:5]
ser = df.stack()
ser.index.names = ["hash", "category"]
ser.name = "value"
df = ser.reset_index()
assert "value" in df
df = ser.reset_index(name="value2")
assert "value2" in df
# check inplace
s = ser.reset_index(drop=True)
s2 = ser
s2.reset_index(drop=True, inplace=True)
tm.assert_series_equal(s, s2)
# level
index = MultiIndex(
levels=[["bar"], ["one", "two", "three"], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
)
s = Series(np.random.randn(6), index=index)
rs = s.reset_index(level=1)
assert len(rs.columns) == 2
rs = s.reset_index(level=[0, 2], drop=True)
tm.assert_index_equal(rs.index, Index(index.get_level_values(1)))
assert isinstance(rs, Series)
def test_reset_index_name(self):
s = Series([1, 2, 3], index=Index(range(3), name="x"))
assert s.reset_index().index.name is None
assert s.reset_index(drop=True).index.name is None
def test_reset_index_level(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
for levels in ["A", "B"], [0, 1]:
# With MultiIndex
s = df.set_index(["A", "B"])["C"]
result = s.reset_index(level=levels[0])
tm.assert_frame_equal(result, df.set_index("B"))
result = s.reset_index(level=levels[:1])
tm.assert_frame_equal(result, df.set_index("B"))
result = s.reset_index(level=levels)
tm.assert_frame_equal(result, df)
result = df.set_index(["A", "B"]).reset_index(level=levels, drop=True)
tm.assert_frame_equal(result, df[["C"]])
with pytest.raises(KeyError, match="Level E "):
s.reset_index(level=["A", "E"])
# With single-level Index
s = df.set_index("A")["B"]
result = s.reset_index(level=levels[0])
tm.assert_frame_equal(result, df[["A", "B"]])
result = s.reset_index(level=levels[:1])
tm.assert_frame_equal(result, df[["A", "B"]])
result = s.reset_index(level=levels[0], drop=True)
tm.assert_series_equal(result, df["B"])
with pytest.raises(IndexError, match="Too many levels"):
s.reset_index(level=[0, 1, 2])
# Check that .reset_index([],drop=True) doesn't fail
result = Series(range(4)).reset_index([], drop=True)
expected = Series(range(4))
tm.assert_series_equal(result, expected)
def test_reset_index_range(self):
# GH 12071
s = Series(range(2), name="A", dtype="int64")
series_result = s.reset_index()
assert isinstance(series_result.index, RangeIndex)
series_expected = DataFrame(
[[0, 0], [1, 1]], columns=["index", "A"], index=RangeIndex(stop=2)
)
tm.assert_frame_equal(series_result, series_expected)
def test_reorder_levels(self):
index = MultiIndex(
levels=[["bar"], ["one", "two", "three"], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
names=["L0", "L1", "L2"],
)
s = Series(np.arange(6), index=index)
# no change, position
result = s.reorder_levels([0, 1, 2])
|
tm.assert_series_equal(s, result)
|
pandas.util.testing.assert_series_equal
|
from pandas import DataFrame
from trane.ops.aggregation_ops import * # noqa
from trane.utils.table_meta import TableMeta as TM
df =
|
DataFrame({'col': [1, 2, 3, 4, 5]})
|
pandas.DataFrame
|
from __future__ import absolute_import
import numpy as np
import pandas as pd
qty=10000
gauss = {'oneA': np.random.randn(qty),
'oneB': np.random.randn(qty),
'cats': np.random.randint(0,5,size=qty),
'hundredA': np.random.randn(qty)*100,
'hundredB': np.random.randn(qty)*100}
gauss =
|
pd.DataFrame(gauss)
|
pandas.DataFrame
|
import os, json, requests
import pandas as pd
import numpy as np
import jieba
import keras
from sklearn.preprocessing import LabelEncoder
from keras import backend as K
from flask import Flask, request
app = Flask(__name__)
tf_serving_api = "http://127.0.0.1:8501/v1/models/VarietyPredictionZh:predict"
base_dir="/Users/wxf/Documents/GitHub/FL-WINE-PROJECT/"
jieba_dict_file = base_dir + "dataset/jieba-dict/dict.txt"
jieba.load_userdict(jieba_dict_file)
data =
|
pd.read_csv(base_dir + "dataset/wine-review/winemag-data-130k-v2-zh-resampled.csv")
|
pandas.read_csv
|
# Note that this is work in progress and some hard-coded values were used for initial examples only and will be
# removed (and generalized) in the future
from mastml.legos import feature_generators
import pandas as pd
import numpy as np
#from sklearn.externals import joblib
import joblib
import os
def get_input_columns(training_data_path, exclude_columns):
# Load in training data and get input columns
#try:
# df_train = pd.read_csv(training_data_path)
#except:
# df_train = pd.read_excel(training_data_path)
df_train = training_data_path
input_columns = [col for col in df_train.columns.tolist() if col not in exclude_columns]
return input_columns
def featurize_mastml(prediction_data, scaler_path, training_data_path, exclude_columns):
'''
prediction_data: a string, list of strings, or path to an excel file to read in compositions to predict
file_path (str): file path to test data set to featurize
composition_column_name (str): name of column in test data containing material compositions. Just assume it is 'composition'
scaler: sklearn normalizer, e.g. StandardScaler() object, fit to the training data
training_data_path (str): file path to training data set used in original model fit
'''
# Write featurizer that takes chemical formula of test materials (from file), constructs correct feature vector then reports predictions
# TODO: make this variable input
COMPOSITION_COLUMN_NAME = 'composition'
if type(prediction_data) is str:
if '.xlsx' in prediction_data:
df_new = pd.read_excel(prediction_data, header=0)
compositions = df_new[COMPOSITION_COLUMN_NAME].tolist()
elif '.csv' in prediction_data:
df_new = pd.read_csv(prediction_data, header=0)
compositions = df_new[COMPOSITION_COLUMN_NAME].tolist()
else:
compositions = [prediction_data]
df_new = pd.DataFrame().from_dict(data={COMPOSITION_COLUMN_NAME: compositions})
elif type(prediction_data) is list:
compositions = prediction_data
df_new = pd.DataFrame().from_dict(data={COMPOSITION_COLUMN_NAME: compositions})
else:
raise TypeError('prediction_data must be a composition in the form of a string, list of strings, or .csv or .xlsx file path')
# Also get the training data so can build MAGPIE list and see which are constant features
#if type(training_data_path) is str:
# if '.xlsx' in training_data_path:
# df_train = pd.read_excel(training_data_path, header=0)
# elif '.csv' in training_data_path:
# df_train = pd.read_csv(training_data_path, header=0)
#else:
df_train = training_data_path
# Generate and use magpie featurizer using mastml
magpie = feature_generators.Magpie(composition_feature=COMPOSITION_COLUMN_NAME,
feature_types=['composition_avg', 'arithmetic_avg', 'max', 'min', 'difference'])
magpie.fit(df_new)
df_new_featurized = magpie.transform(df_new)
# df_train may have other columns with it. Just take the composition column to make features ???
df_train =
|
pd.DataFrame(df_train[COMPOSITION_COLUMN_NAME])
|
pandas.DataFrame
|
import pandas as pd
import matplotlib.pyplot as plt # Matlab-style plotting
import seaborn as sns
color = sns.color_palette()
import warnings
warnings.filterwarnings('ignore')
order_products_prior_df= pd.read_csv("./input/order_products_prior.csv")
order_products_train_df=
|
pd.read_csv("./input/order_products_train.csv")
|
pandas.read_csv
|
#!/usr/bin/env python3
"""<NAME>, Programming Assignment 3, preprocessing.py
This module provides the Preprocessor class.
"""
# Standard library imports
from pathlib import Path
import collections as c
import typing as t
# Third party libraries
import numpy as np
import pandas as pd
# Local imports
from p4.preprocessing.jenks import compute_two_break_jenks
from p4.preprocessing.split import make_splits
class Preprocessor:
def __init__(self, dataset_name: str, dataset_meta: dict, data_dir: Path):
self.dataset_name = dataset_name
self.dataset_meta = dataset_meta
self.data_dir = Path(data_dir)
self.dataset_src: Path = self.data_dir / dataset_meta["data_filename"]
self.names_meta = pd.DataFrame(self.dataset_meta["names_meta"]).set_index("name")
self.names = list(self.names_meta.index.values)
self.imputed_data: t.Union[pd.DataFrame, None] = None
self.numeric_columns: t.Union[list, None] = None
self.jenks_breaks: dict = {}
self.discretize_dict = c.defaultdict(lambda: {})
self.data_classes: t.Union[c.OrderedDict, None] = None
self.drops = ["sample_code_number", "model_name", "vendor_name", "erp"]
def __repr__(self):
return f"{self.dataset_name} Loader"
def compute_natural_breaks(self, numeric_cols: list = None, n_breaks=2, exclude_ordinal=True) -> pd.DataFrame:
"""
Compute two-class natural Jenks breaks for each numeric column.
:param numeric_cols: List of numeric columns to compute breaks for
:param n_breaks: Number of breaks to split list into
:param exclude_ordinal: True to exclude ordinal columns
:return: Dataframe of indexed break assignments
"""
if n_breaks != 2:
msg = "Jenks breaks are only available for two classes / breaks."
raise NotImplementedError(msg)
# Select all numeric columns if none are provided
numeric_cols = self.get_numeric_columns() if numeric_cols is None else numeric_cols
# If indicated, remove ordinal columns
if exclude_ordinal:
ordinal_cols = self.names_meta[self.names_meta.data_class == "ordinal"].index
numeric_cols = [x for x in numeric_cols if x not in ordinal_cols]
for numeric_col in numeric_cols:
values = self.data[numeric_col].tolist()
self.jenks_breaks[numeric_col] = compute_two_break_jenks(values)
self.jenks_breaks = pd.DataFrame.from_dict(self.jenks_breaks).transpose()
self.jenks_breaks.sort_values(by="gcvf", ascending=False, inplace=True)
return self.jenks_breaks
def discretize(self, discretize_dict: dict) -> pd.DataFrame:
"""
Discretize indicated columns using provided discretize_dict.
:param discretize_dict: Dictionary keyed by column
:return: Discretized columns
Example discretize_dict structure:
{"bare_nuclei": {"n_bins": 2, "binning": "equal_width"},
"normal_nucleoli": {"n_bins": 2, "binning": "equal_width"}}
"""
self.discretize_dict = c.defaultdict(lambda: {}, discretize_dict)
for col, bin_dict in self.discretize_dict.items():
frame, retbins = self._discretize(self.data[col], bin_dict["n_bins"], bin_dict["binning"])
self.data.drop(axis=1, labels=col, inplace=True)
self.data = self.data.join(frame)
self.discretize_dict[col]["retbins"] = retbins
return self.data[list(discretize_dict.keys())]
def drop(self, labels: t.Union[list, None] = None) -> pd.DataFrame:
"""
Drop selected columns.
:param labels: Columns to drop
:return: Updated dataset
"""
if labels is None:
labels = self.drops
self.data.drop(axis=1, labels=labels, inplace=True, errors="ignore")
return self.data
def dummy(self, columns: t.Union[list[str], str, None] = "default") -> pd.DataFrame:
"""
Dummy categorical columns.
:param columns: 'default' for defaults, list to specify them, False / None to do nothing
:return: Data
"""
if columns == "default":
mask = self.names_meta["data_class"] == "categorical"
mask = mask & self.names_meta["feature"]
columns = self.names_meta[mask].index.values.tolist()
if columns:
self.data =
|
pd.get_dummies(self.data, columns=columns)
|
pandas.get_dummies
|
# dependencies
import os
import json
import numpy as np
import pandas as pd
import re
import time
import nltk
import re
import string
#from tensorlayer import * as tl
import tensorlayer as tl
#import .utils
import imageio
cwd = "D:/My_Stuff/JHU_Stuff/625.742/coco/annotations/"
caption_dir = cwd #os.path.join(, 'annotations_trainval2014/annotations/')
img_dir = "D:/My_Stuff/JHU_Stuff/625.742/coco/annotations/train2014/train2014/"
VOC_FIR = cwd + '/vocab.txt'
with open(caption_dir + 'instances_train2014.json') as json_data:
inst = json.load(json_data)
# annotations
anns = pd.DataFrame.from_dict(inst['annotations'])
# categories
cats = pd.DataFrame.from_dict(inst['categories'])
with open(caption_dir + 'captions_train2014.json') as json_data:
caps = json.load(json_data)
imagerefs = pd.DataFrame.from_dict(caps['images'])
captions =
|
pd.DataFrame.from_dict(caps['annotations'])
|
pandas.DataFrame.from_dict
|
import doctest
import os
from unittest import TestCase
import pandas as pd
import xarray as xr
from pysd.tools.benchmarking import assert_frames_close
_root = os.path.dirname(__file__)
class TestUtils(TestCase):
def test_xrsplit(self):
import pysd
array1d = xr.DataArray([0.5, 0., 1.],
{'ABC': ['A', 'B', 'C']},
['ABC'])
array2d = xr.DataArray([[0.5, -1.5],
[-1., -0.5],
[-0.75, 0.]],
{'ABC': ['A', 'B', 'C'],
'XY': ['X', 'Y']},
['ABC', 'XY'])
array3d = xr.DataArray([[[0.5, 4.], [-1.5, 3.]],
[[-1., 2.], [-0.5, 5.5]],
[[-0.75, 0.75], [0., -1.]]],
{'ABC': ['A', 'B', 'C'],
'XY': ['X', 'Y'],
'FG': ['F', 'G']},
['ABC', 'XY', 'FG'])
s1d = pysd.utils.xrsplit(array1d)
s2d = pysd.utils.xrsplit(array2d)
s3d = pysd.utils.xrsplit(array3d)
# check length
self.assertEqual(len(s1d), 3)
self.assertEqual(len(s2d), 6)
self.assertEqual(len(s3d), 12)
# check all values for 1d
self.assertIn(xr.DataArray(0.5, {'ABC': ['A']}, ['ABC']), s1d)
self.assertIn(xr.DataArray(0., {'ABC': ['B']}, ['ABC']), s1d)
self.assertIn(xr.DataArray(1., {'ABC': ['C']}, ['ABC']), s1d)
# check some values for 2d and 3d
self.assertIn(xr.DataArray(0.5,
{'ABC': ['A'], 'XY': ['X']},
['ABC', 'XY']),
s2d)
self.assertIn(xr.DataArray(-0.5,
{'ABC': ['B'], 'XY': ['Y']},
['ABC', 'XY']),
s2d)
self.assertIn(xr.DataArray(-0.5,
{'ABC': ['B'], 'XY': ['Y'], 'FG': ['F']},
['ABC', 'XY', 'FG']),
s3d)
self.assertIn(xr.DataArray(0.75,
{'ABC': ['C'], 'XY': ['X'], 'FG': ['G']},
['ABC', 'XY', 'FG']),
s3d)
def test_get_return_elements_subscirpts(self):
import pysd
self.assertEqual(
pysd.utils.get_return_elements(
["Inflow A[Entry 1,Column 1]",
"Inflow A[Entry 1,Column 2]"],
{'Inflow A': 'inflow_a'}),
(['inflow_a'],
{'Inflow A[Entry 1,Column 1]': ('inflow_a',
('Entry 1', 'Column 1')),
'Inflow A[Entry 1,Column 2]': ('inflow_a',
('Entry 1', 'Column 2'))}
)
)
def test_get_return_elements_realnames(self):
import pysd
self.assertEqual(
pysd.utils.get_return_elements(
["Inflow A", "Inflow B"],
{'Inflow A': 'inflow_a', 'Inflow B': 'inflow_b'}),
(['inflow_a', 'inflow_b'],
{'Inflow A': ('inflow_a', None),
'Inflow B': ('inflow_b', None)}
)
)
def test_get_return_elements_pysafe_names(self):
import pysd
self.assertEqual(
pysd.utils.get_return_elements(
["inflow_a", "inflow_b"],
{'Inflow A': 'inflow_a', 'Inflow B': 'inflow_b'}),
(['inflow_a', 'inflow_b'],
{'inflow_a': ('inflow_a', None),
'inflow_b': ('inflow_b', None)}
)
)
def test_get_return_elements_not_found_error(self):
""""
Test for not found element
"""
import pysd
with self.assertRaises(KeyError):
pysd.utils.get_return_elements(
["inflow_a", "inflow_b", "inflow_c"],
{'Inflow A': 'inflow_a', 'Inflow B': 'inflow_b'})
def test_make_flat_df(self):
import pysd
df = pd.DataFrame(index=[1], columns=['elem1'])
df.at[1] = [xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
{'Dim1': ['A', 'B', 'C'],
'Dim2': ['D', 'E', 'F']},
dims=['Dim1', 'Dim2'])]
expected = pd.DataFrame(index=[1], data={'Elem1[B,F]': 6.})
return_addresses = {
'Elem1[B,F]': ('elem1', {'Dim1': ['B'], 'Dim2': ['F']})}
actual = pysd.utils.make_flat_df(df, return_addresses)
# check all columns are in the DataFrame
self.assertEqual(set(actual.columns), set(expected.columns))
assert_frames_close(actual, expected, rtol=1e-8, atol=1e-8)
def test_make_flat_df_nosubs(self):
import pysd
df = pd.DataFrame(index=[1], columns=['elem1', 'elem2'])
df.at[1] = [25, 13]
expected = pd.DataFrame(index=[1], columns=['Elem1', 'Elem2'])
expected.at[1] = [25, 13]
return_addresses = {'Elem1': ('elem1', {}),
'Elem2': ('elem2', {})}
actual = pysd.utils.make_flat_df(df, return_addresses)
# check all columns are in the DataFrame
self.assertEqual(set(actual.columns), set(expected.columns))
self.assertTrue(all(actual['Elem1'] == expected['Elem1']))
self.assertTrue(all(actual['Elem2'] == expected['Elem2']))
def test_make_flat_df_return_array(self):
""" There could be cases where we want to
return a whole section of an array - ie, by passing in only part of
the simulation dictionary. in this case, we can't force to float..."""
import pysd
df = pd.DataFrame(index=[1], columns=['elem1', 'elem2'])
df.at[1] = [xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
{'Dim1': ['A', 'B', 'C'],
'Dim2': ['D', 'E', 'F']},
dims=['Dim1', 'Dim2']),
xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
{'Dim1': ['A', 'B', 'C'],
'Dim2': ['D', 'E', 'F']},
dims=['Dim1', 'Dim2'])]
expected =
|
pd.DataFrame(index=[1], columns=['Elem1[A, Dim2]', 'Elem2'])
|
pandas.DataFrame
|
#!/usr/bin/env python
"""
Plot ARMA Results for EPRI Cases.
This script requires cryptography module to be installed.
You can install this via:
conda install cryptography scikit-learn
or
pip install cryptography scikit-learn
"""
# Internal Libraries
import argparse
import time
import hashlib
import collections
import datetime as dt
import xml.etree.cElementTree as ET
from pathlib import Path
from typing import Iterator, Dict, List, Tuple, Union, Callable
from typing_extensions import TypedDict
# External Libraries
import pandas as pd
import numpy as np
import matplotlib as mpl
mpl.use('Agg') # Prevents the script from blocking while plotting
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors # type: ignore
import sklearn.linear_model
import statsmodels.api as sm
from matplotlib.font_manager import FontProperties
from matplotlib.ticker import StrMethodFormatter # type:ignore
from cryptography.fernet import Fernet
# Global Constants
YEARS = np.arange(2025, 2055, 5)
DAYS = ['Sun.', 'Mon.', 'Tues.', 'Wed.', 'Thurs.', 'Fri.', 'Sat.']
MONTHS = [
'Jan.', 'Feb.', 'Mar.', 'Apr.', 'May', 'June',
'July', 'Aug.', 'Sept.', 'Oct.', 'Nov.', 'Dec.'
]
CRYPTO = b"_1sbrS8rGU1kSSCLIuN6e2Z_Gs4UhJX8uCVPETuJkOs="
BASE_DIR = Path(__file__).resolve().parent.parent
TRAIN_DIR = BASE_DIR.joinpath('train')
# Initialize Color-Map
set1 = np.unique(plt.cm.Dark2(np.linspace(0, 1, 256)), axis=0) # type: ignore
set2 = [plt.cm.viridis(i) for i in [0.0, 0.25, 0.5, 0.75, 1.0]] # type: ignore
set3 = np.unique(plt.cm.tab20(np.linspace(0, 1, 256)), axis=0) # type: ignore
allc = np.vstack((set1, set2, set3))
COLORS = mcolors.LinearSegmentedColormap.from_list('rom_colormap', allc)
NORM = mcolors.BoundaryNorm(np.arange(0, 32, 1), COLORS.N, clip=True)
# Matplotlib Global Settings
plt.rc("figure", figsize=(25, 10))
plt.rc(
"axes",
titlesize=25,
titleweight="bold",
labelsize=25,
axisbelow=True,
grid=True
)
plt.rc("savefig", bbox="tight")
plt.rc("legend", fontsize=20)
plt.rc(["xtick", "ytick"], labelsize=20)
class Info(TypedDict):
"""Heterogenously typed dictionary for stricter type checking."""
info: Dict[str, str]
xmlhash: str
args: argparse.Namespace
fetch: Callable
class PlotOpts(TypedDict):
"""Heterogenously typed dictionary for stricter type checking."""
density: bool
bins: int
edgecolor: str
## Functions
def search_node(root: ET.Element, node: str, child_element_names: List[str]) -> Dict[str, Union[str, None]]:
"""
Return dictionary containing information requested from node children.
@In: root, ET.Element, root node of xml tree.
@In: node, str, xpath to parent node of interest.
@In: children, List[str], expected children nodes.
@Out: values, Dict[str, str], retrieved information for subnode.
"""
values = {
# This information will be placed in LaTeX table;
# Therefore, we need to preemptively escape underscores.
child.replace("_", r"\_"): root.findtext(f"{node}/{child}")
for child in child_element_names
if root.findtext(f"{node}/{child}") is not None
}
return values
def parse_xml(xml_file: Path) -> Dict[str, str]:
"""
Parse model information from xml file.
@In: xml_file, Path, path to current specified xml_file.
@Out: info_dict, Dict[str, str], information parsed from xml.
"""
root = ET.parse(xml_file).getroot()
now = dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# Information parsed from xml file.
case_info = {
"state": xml_file.name.split("_")[0].upper(),
"strategy": xml_file.resolve().parent.name,
}
model_info = search_node(root, "Models/ROM", ["P", "Q", "Fourier"])
if root.find("Models/ROM/Segment/subspace") is not None:
subspace_info = root.find("Models/ROM/Segment/subspace").attrib
else:
subspace_info: Dict[str, str] = {}
model_info = {**model_info, **subspace_info}
pp_info = search_node(
root,
"Models/PostProcessor/KDD",
["SKLtype", "n_clusters", "tol", "random_state"],
)
samp_info = search_node(root, "Samplers/MonteCarlo/samplerInit", ["limit"])
misc_info = {"created": now}
working_dir_info = search_node(root, "RunInfo", ['WorkingDir'])
# Merge all dictionaries
# This should allow us to not fail on missing nodes
info_dict = {
**case_info, **model_info, **pp_info,
**samp_info, **misc_info, **working_dir_info
}
return info_dict
def parse_meta(xml_file: Path) -> Callable[[int, int], int]:
"""
Return callable that returns cluster info for each ROM for each year.
@In: xml_file, Path, path to romMeta.xml
@Out: get_cluster, Callable, a function that takes a segment and
returns a cluster.
"""
root = ET.parse(xml_file).getroot()
meta = {}
for year_rom in root.findall("arma/MacroStepROM"):
meta[int(year_rom.get('YEAR', '0'))] = {}
for cluster in year_rom.findall("ClusterROM"):
cluster_num = int(cluster.get('cluster', '999'))
segments = tuple([int(i) for i in cluster.findtext('segments_represented').split(", ")])
# Assign tuple of segments as key and Cluster Number as value
meta[int(year_rom.get('YEAR', '0'))][segments] = cluster_num
def get_cluster(year: int, segment: int) -> int:
"""
Return cluster number for a given segment.
@In: year, int, year given from the ROM.
@In: segment, int, segment from predicted values of ROM.
@Out: int, a number representing cluster number.
"""
id_segments = next(filter(lambda x: segment in x, meta[year].keys()))
return meta[year][id_segments]
return get_cluster
def hash_xml(xml_file: Path) -> str:
"""
Return a truncated sha1 hash of xml.
@In: xml_file, Path, current specified xml_file.
@Out: sha_signature, str, sha1 hash of xml_file.
"""
hash_str = open(xml_file, "rb").read()
sha_signature = hashlib.sha1(hash_str).hexdigest()[:8]
return sha_signature
def encrypt_xml(xml_file: Path) -> bytes:
"""
Encrypt XML document that created the plots.
@In: xml_file, Path, current specified xml_file.
@Out: xml_hash, bytes, hash of xml file to embedd in PNG.
"""
cipher = Fernet(CRYPTO)
with open(xml_file, "rb") as out:
xml_hash = cipher.encrypt(out.read())
return xml_hash
def decrypt_xml(png_file: Path) -> str:
"""
Decrypt XML file that is hidden in png file.
@In: png_file, Path, PNG file with embedded xml.
@Out: xml_text, str, original xml file.
"""
cipher = Fernet(CRYPTO)
with open(png_file, "rb") as f:
xml_hash = f.read().split(b"IEND\xaeB`\x82")[1]
xml_text = cipher.decrypt(xml_hash).decode("utf-8")
return xml_text
def embed_xml(xml_file: Path, old_image: Path, new_image: Path) -> None:
"""
Embed XML file into an image.
@In: xml_file, Path, path to xml you want to embedd.
@In: old_image, Path, path to the original png file without xml.
@In: new_image, Path, path to the new image containing xml.
@Out: None
"""
with open(new_image, "wb") as out:
out.write(open(old_image, "rb").read())
out.write(encrypt_xml(xml_file))
def ecdf(sample) -> Tuple[float, float]:
"""
Return empirical cdf of sample data.
@In: sample, pd.Series, a vector of observed data.
@Out: Tuple[float, float], quantile and cumulative probability vectors.
"""
sample = np.atleast_1d(sample).astype(np.double) # type: ignore
quantiles, counts = np.unique(sample, return_counts=True)
cumprob = np.cumsum(counts).astype(np.double) / sample.size # type: ignore
return quantiles, cumprob # type: ignore
def detrend(pivots, values, periods: List[float]) -> pd.Series:
"""
Return a time-series signal detrending using Fourier analysis.
@In: df, pd.DataFrame, a dataframe containing signal information.
@Out: pd.Series, a detrended time-series vector
"""
# TODO: scrape fourier bases from current xml file.
fourier = np.zeros((pivots.size, 2*len(periods)))
for p, period in enumerate(periods):
hist = 2.0 * (np.pi / period) * pivots
fourier[:, 2 * p] = np.sin(hist)
fourier[:, 2 * p + 1] = np.cos(hist)
masks = np.ones(len(values), dtype=bool) # type: ignore
fe = sklearn.linear_model.LinearRegression(normalize=False)
fs = fourier[masks, :]
values = values[masks]
fe.fit(fs, values)
intercept = fe.intercept_
coeffs = fe.coef_
wave_coef_map: Dict = collections.defaultdict(dict)
for c, coef in enumerate(coeffs):
period = periods[c//2]
waveform = 'sin' if c % 2 == 0 else 'cos'
wave_coef_map[period][waveform] = coef
coef_map = {}
signal = np.ones(len(pivots)) * intercept
for period, coefs in wave_coef_map.items():
A = coefs['sin']
B = coefs['cos']
s, C = ((np.arctan2(B, A)), A / np.cos(np.arctan2(B, A))) # type: ignore
coef_map[period] = {'amplitude': C, 'phase': s}
signal += C * np.sin(2.0 * np.pi * pivots / period + s)
return signal # type: ignore
def add_plot_table(fig: plt.Figure, axes: List[plt.Axes], info: Dict[str, str]) -> None:
"""
Add table of ROM Parameters to graphic.
@In: fig, plt.Figure, current figure to contain plots
@In: axes, list, a list of current axes in figure
@In: info, dict, a dictionary of model information.
@Out: None
"""
gs = fig.add_gridspec(nrows=2, ncols=(len(YEARS) // 2 + 1))
nested_axes = [axes[i:i+3] for i in range(0, 6, 3)]
for i, row in enumerate(nested_axes):
for j, ax in enumerate(row):
ax.set_position(gs[i, j].get_position(fig))
ax.set_subplotspec(gs[i, j])
fig.tight_layout()
axtab = fig.add_subplot(gs[0:, -1])
table_vals = [[val] for _, val in info.items()]
tab = axtab.table(
table_vals,
colLabels=[r"$\bf{ROM \ Parameters}$"],
rowLabels=[fr"$\bf{k}$" for k in info.keys()],
bbox=[0.3, 0.2, 0.72, 0.7],
)
axtab.set_axis_off()
tab.auto_set_font_size(False)
tab.set_fontsize(15)
tab.scale(.7, 4)
# Prevent a long fourier vector from overlapping on table.
if 22 < len(info['Fourier']) < 27:
tab.get_celld()[(5, 0)].set_text_props(fontproperties=FontProperties(size=10))
elif len(info['Fourier']) >= 27:
fourier_ele = info['Fourier'].split(', ')
fourier_len = np.array([len(ele) for ele in fourier_ele])
which_max = np.argmin(fourier_len.cumsum() < 26)
fourier_new = ', '.join(fourier_ele[:which_max]) + '...'
tab.get_celld()[(5, 0)].set_text_props(
fontproperties=FontProperties(size=10),
text=fourier_new
)
def plot_time(ax: plt.Axes,
year_rom: pd.DataFrame,
epri_dat: pd.DataFrame,
**kwargs) -> None:
"""
Plot time series comparison of original and sampled ROM.
@In: ax, plt.Axes, current axes to plot data with.
@In: year_rom, pd.DataFrame, ARMA output for the specified year
@In: epri_dat, pd.DataFrame, Original EPRI data for specified year
@Out: None
"""
for _, dat in year_rom.groupby("RAVEN_sample_ID"):
ax.plot(
dat.HOUR.to_numpy(), dat.TOTALLOAD.to_numpy(),
alpha=0.1, color="blue", label="Synthetic"
)
ax.plot(
epri_dat.HOUR.to_numpy(), epri_dat.TOTALLOAD.to_numpy(),
color="darkred", label="US-REGEN"
)
h, l = ax.get_legend_handles_labels()
by_label = dict(zip(l, h))
leg = ax.legend(by_label.values(), by_label.keys())
for lh in leg.legendHandles:
lh.set_alpha(1)
ax.yaxis.set_major_formatter(StrMethodFormatter("{x:,.0f}"))
# ax.set_xlabel("Period (Hour)")
# ax.set_ylabel("Total Load (GW)")
def plot_hist(ax: plt.Axes,
year_rom: pd.DataFrame,
epri_dat: pd.DataFrame,
**kwargs) -> None:
"""
Plot histrogram comprarison of original and sampled ROM.
@In: ax, plt.Axes, current axes to plot data with.
@In: year_rom, pd.DataFrame, ARMA output for the specified year
@In: epri_dat, pd.DataFrame, Original EPRI data for specified year
@Out: None
"""
plot_opts: PlotOpts = {"density": True, "bins": 50, "edgecolor": "black",}
ax.hist(
year_rom.TOTALLOAD.to_numpy(),
color="blue", label="Synthetic", **plot_opts,
)
ax.hist(
epri_dat.TOTALLOAD.to_numpy(),
color="darkred", alpha=0.7, label="US-REGEN", **plot_opts,
)
ax.legend()
def plot_ecdf(ax: plt.Axes,
year_rom: pd.DataFrame,
epri_dat: pd.DataFrame,
**kwargs) -> None:
"""
Plot Load Duration Curves.
@In: ax, plt.Axes, current axes to plot data with.
@In: year_rom, pd.DataFrame, ARMA output for the specified year
@In: epri_dat, pd.DataFrame, Original EPRI data for specified year
@Out: None
"""
epri_q, epri_p = ecdf(epri_dat.TOTALLOAD.to_numpy())
custom_lines = [
mpl.lines.Line2D([0], [0], color='blue', lw=4), # type: ignore
mpl.lines.Line2D([0], [0], color='darkred', lw=4) # type: ignore
]
for _, dat in year_rom.groupby("RAVEN_sample_ID"):
sim_q, sim_p = ecdf(dat.TOTALLOAD.to_numpy())
ax.plot(sim_p, sim_q, linestyle='-', color="blue", alpha=0.3, lw=1.5) # type: ignore
ax.plot(epri_p, epri_q, linestyle='-', color='darkred', lw=1.5) # type: ignore
ax.legend(custom_lines, ['Synthetic', 'US-REGEN'])
def plot_orig(ax: plt.Axes, _: pd.DataFrame, epri_dat: pd.DataFrame, **kwargs) -> None:
"""
Plot Fourier decomposition on top of original data.
@In: ax, plt.Axes, current axes to plot data with
@In: _, pd.DataFrame, ARMA output unneeded for this plot
@In: epri_dat, pd.DataFrame, Original EPRI data for specified year
@Out: None
"""
bases = [float(i) for i in kwargs['Fourier'].split(', ')]
pivots = epri_dat.HOUR.to_numpy()
values = epri_dat.TOTALLOAD.to_numpy()
fourier = detrend(pivots, values, bases)
ax.plot(pivots, values, color="darkred", label='US-REGEN')
ax.plot(pivots, fourier, color="darkblue", label='Fourier Series') # type: ignore
ax.legend()
def plot_clust(ax: plt.Axes,
year_rom: pd.DataFrame,
_: pd.DataFrame,
**kwargs) -> None:
"""
Plot time series comparison of original and sampled ROM with
highlighted cluster information.
@In: ax, plt.Axes, current axes to plot data with.
@In: year_rom, pd.DataFrame, ARMA output for the specified year
@In: epri_dat, pd.DataFrame, Original EPRI data for specified year
@Out: None
"""
# Let's only plot the clusters of one sample.
year_rom = year_rom.query("RAVEN_sample_ID == 0").iloc[:-1, :]
for _, d, in year_rom.groupby("SEGMENT"):
clust = np.unique(d.CLUSTER)[0]
ax.plot(d.HOUR.to_numpy(), d.TOTALLOAD.to_numpy(), color=allc[clust]) # type: ignore
ax.yaxis.set_major_formatter(StrMethodFormatter("{x:,.0f}"))
def date_heatmap(series, ax, **kwargs):
"""
Create calendar plot
@In:
@In:
@Out:
"""
# Combine values occurring on the same day.
dates = series.index.floor('D')
group = series.groupby(dates)
series = group.mean()
# Parse start/end, defaulting to the min/max of the index.
start = pd.to_datetime(series.index.min()) # type: ignore
end = pd.to_datetime(series.index.max()) # type: ignore
# We use [start, end) as a half-open interval below.
end += np.timedelta64(1, 'D') # type: ignore
# Get the previous/following Sunday to start/end.
# Pandas and numpy day-of-week conventions are Monday=0 and Sunday=6.
start_sun = start - np.timedelta64((start.dayofweek + 1) % 7, 'D') # type: ignore
end_sun = end + np.timedelta64(7 - end.dayofweek - 1, 'D') # type: ignore
# Create the heatmap and track ticks.
num_weeks = (end_sun - start_sun).days // 7
heatmap = np.nan * np.zeros((7, num_weeks))
ticks = {} # week number -> month name
for week in range(num_weeks):
for day in range(7):
date = start_sun + np.timedelta64(7 * week + day, 'D') # type: ignore
if date.day == 1:
ticks[week] = MONTHS[date.month - 1]
if date.dayofyear == 1:
ticks[week] += f'\n{date.year}'
if start <= date < end:
heatmap[day, week] = series.get(date, np.nan)
# Get the coordinates, offset by 0.5 to align the ticks.
y = np.arange(8) - 0.5
x = np.arange(num_weeks + 1) - 0.5
mesh = ax.pcolormesh(x, y, heatmap, cmap=COLORS, norm=NORM, rasterized=True, **kwargs)
ax.invert_yaxis()
ax.set_xticks(list(ticks.keys()))
ax.set_xticklabels(list(ticks.values()))
ax.set_yticks(np.arange(7))
ax.set_yticklabels(DAYS)
plt.sca(ax)
plt.sci(mesh)
return ax
def plot_cal(ax: plt.Axes,
year_rom: pd.DataFrame,
_: pd.DataFrame,
**kwargs) -> None:
"""
@In: ax, plt.Axes, current axes to plot data with.
@In: year_rom, pd.DataFrame, ARMA output for the specified year
@In: epri_dat, pd.DataFrame, Original EPRI data for specified year
@In: kwargs, dict, extra options
@Out: None
"""
series = (
year_rom.query("RAVEN_sample_ID == 0")[["TIMESTAMP", "CLUSTER"]]
.set_index("TIMESTAMP")
.squeeze()
)
ax = date_heatmap(series, ax=ax, edgecolor='black')
# colorbar = plt.colorbar(
# orientation='horizontal', drawedges=True, pad=0.25,
# fraction=0.9
# )
# colorbar.set_ticks(np.arange(0, 31, 1) + 0.5)
# colorbar.set_ticklabels(np.arange(0, 31, 1))
# colorbar.outline.set_linewidth(2) # type: ignore
# colorbar.dividers.set_linewidth(2) # type: ignore
# ax.set_aspect('equal')
def plot_ac(ax, _, epri_dat, **kwargs):
sm.graphics.tsa.plot_acf(epri_dat.TOTALLOAD.squeeze(), lags=40, ax=ax)
ax.set_xlabel('Lag')
def transform_data(file_path: Union[Path, pd.DataFrame], **kwargs) -> pd.DataFrame:
"""
Add datetime column to a dataframe.
@In: file_path, Path or pd.DataFrame, either read-in data or modify exisiting.
@Out: pd.DataFrame, transformed dataframe.
"""
# TODO: probably not good practice to anticipate a file_path or dataframe.
if isinstance(file_path, pd.DataFrame):
df = file_path
else:
df =
|
pd.read_csv(file_path, engine='c', memory_map=True)
|
pandas.read_csv
|
import pandas as pd
import pytest
from rdtools.normalization import normalize_with_expected_power
from pandas import Timestamp
import numpy as np
@pytest.fixture()
def times_15():
return pd.date_range(start='20200101 12:00', end='20200101 13:00', freq='15T')
@pytest.fixture()
def times_30():
return pd.date_range(start='20200101 12:00', end='20200101 13:00', freq='30T')
@pytest.fixture()
def pv_15(times_15):
return pd.Series([1.0, 2.5, 3.0, 2.2, 2.1], index=times_15)
@pytest.fixture()
def expected_15(times_15):
return pd.Series([1.2, 2.3, 2.8, 2.1, 2.0], index=times_15)
@pytest.fixture()
def irradiance_15(times_15):
return pd.Series([1000.0, 850.0, 950.0, 975.0, 890.0], index=times_15)
@pytest.fixture()
def pv_30(times_30):
return pd.Series([1.0, 3.0, 2.1], index=times_30)
@pytest.fixture()
def expected_30(times_30):
return pd.Series([1.2, 2.8, 2.0], index=times_30)
@pytest.fixture()
def irradiance_30(times_30):
return pd.Series([1000.0, 950.0, 890.0], index=times_30)
def test_normalize_with_expected_power_uniform_frequency(pv_15, expected_15, irradiance_15):
norm, insol = normalize_with_expected_power(
pv_15, expected_15, irradiance_15)
expected_norm = pd.Series(
{Timestamp('2020-01-01 12:15:00', freq='15T'): 1.0,
Timestamp('2020-01-01 12:30:00', freq='15T'): 1.0784313725490198,
Timestamp('2020-01-01 12:45:00', freq='15T'): 1.0612244897959184,
Timestamp('2020-01-01 13:00:00', freq='15T'): 1.0487804878048783}
)
expected_norm.name = 'energy_Wh'
expected_norm.index.freq = '15T'
expected_insol = pd.Series(
{Timestamp('2020-01-01 12:15:00', freq='15T'): 231.25,
Timestamp('2020-01-01 12:30:00', freq='15T'): 225.0,
Timestamp('2020-01-01 12:45:00', freq='15T'): 240.625,
Timestamp('2020-01-01 13:00:00', freq='15T'): 233.125}
)
expected_insol.name = 'energy_Wh'
expected_insol.index.freq = '15T'
pd.testing.assert_series_equal(norm, expected_norm)
pd.testing.assert_series_equal(insol, expected_insol)
def test_normalize_with_expected_power_energy_option(pv_15, expected_15, irradiance_15):
norm, insol = normalize_with_expected_power(
pv_15, expected_15, irradiance_15, pv_input='energy')
expected_norm = pd.Series(
{
|
Timestamp('2020-01-01 12:00:00', freq='15T')
|
pandas.Timestamp
|
import dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
import datetime as dt
import pandas as pd
import numpy as np
from pmprophet.model import PMProphet, Sampler
import requests
from newsapi import NewsApiClient
import json
urlConfirmed = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv"
urlRecovered = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Recovered.csv"
urlDeceased = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Deaths.csv"
def getData(url):
'''Get the recovered numbers from Johns Hopkins.
Input:
======
url: string with url address of raw data
Output:
=======
recovered:
pandas dataframe
columns = ds, Mainland China, Outside Mainland China
'''
# data time series
df =
|
pd.read_csv(url)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
import re
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_bool_dtype, is_categorical, is_categorical_dtype,
is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64_ns_dtype,
is_datetime64tz_dtype, is_datetimetz, is_dtype_equal, is_interval_dtype,
is_period, is_period_dtype, is_string_dtype)
from pandas.core.dtypes.dtypes import (
CategoricalDtype, DatetimeTZDtype, IntervalDtype, PeriodDtype, registry)
import pandas as pd
from pandas import (
Categorical, CategoricalIndex, IntervalIndex, Series, date_range)
from pandas.core.sparse.api import SparseDtype
import pandas.util.testing as tm
@pytest.fixture(params=[True, False, None])
def ordered(request):
return request.param
class Base(object):
def setup_method(self, method):
self.dtype = self.create()
def test_hash(self):
hash(self.dtype)
def test_equality_invalid(self):
assert not self.dtype == 'foo'
assert not is_dtype_equal(self.dtype, np.int64)
def test_numpy_informed(self):
pytest.raises(TypeError, np.dtype, self.dtype)
assert not self.dtype == np.str_
assert not np.str_ == self.dtype
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not len(self.dtype._cache)
# force back to the cache
result = tm.round_trip_pickle(self.dtype)
assert not len(self.dtype._cache)
assert result == self.dtype
class TestCategoricalDtype(Base):
def create(self):
return CategoricalDtype()
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not len(self.dtype._cache)
# force back to the cache
result = tm.round_trip_pickle(self.dtype)
assert result == self.dtype
def test_hash_vs_equality(self):
dtype = self.dtype
dtype2 = CategoricalDtype()
assert dtype == dtype2
assert dtype2 == dtype
assert hash(dtype) == hash(dtype2)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'category')
assert is_dtype_equal(self.dtype, CategoricalDtype())
assert not is_dtype_equal(self.dtype, 'foo')
def test_construction_from_string(self):
result = CategoricalDtype.construct_from_string('category')
assert is_dtype_equal(self.dtype, result)
pytest.raises(
TypeError, lambda: CategoricalDtype.construct_from_string('foo'))
def test_constructor_invalid(self):
msg = "Parameter 'categories' must be list-like"
with pytest.raises(TypeError, match=msg):
CategoricalDtype("category")
dtype1 = CategoricalDtype(['a', 'b'], ordered=True)
dtype2 = CategoricalDtype(['x', 'y'], ordered=False)
c = Categorical([0, 1], dtype=dtype1, fastpath=True)
@pytest.mark.parametrize('values, categories, ordered, dtype, expected',
[
[None, None, None, None,
CategoricalDtype()],
[None, ['a', 'b'], True, None, dtype1],
[c, None, None, dtype2, dtype2],
[c, ['x', 'y'], False, None, dtype2],
])
def test_from_values_or_dtype(
self, values, categories, ordered, dtype, expected):
result = CategoricalDtype._from_values_or_dtype(values, categories,
ordered, dtype)
assert result == expected
@pytest.mark.parametrize('values, categories, ordered, dtype', [
[None, ['a', 'b'], True, dtype2],
[None, ['a', 'b'], None, dtype2],
[None, None, True, dtype2],
])
def test_from_values_or_dtype_raises(self, values, categories,
ordered, dtype):
msg = "Cannot specify `categories` or `ordered` together with `dtype`."
with pytest.raises(ValueError, match=msg):
CategoricalDtype._from_values_or_dtype(values, categories,
ordered, dtype)
def test_is_dtype(self):
assert CategoricalDtype.is_dtype(self.dtype)
assert CategoricalDtype.is_dtype('category')
assert CategoricalDtype.is_dtype(CategoricalDtype())
assert not CategoricalDtype.is_dtype('foo')
assert not CategoricalDtype.is_dtype(np.float64)
def test_basic(self):
assert is_categorical_dtype(self.dtype)
factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
s = Series(factor, name='A')
# dtypes
assert is_categorical_dtype(s.dtype)
assert is_categorical_dtype(s)
assert not is_categorical_dtype(np.dtype('float64'))
assert is_categorical(s.dtype)
assert is_categorical(s)
assert not is_categorical(np.dtype('float64'))
assert not is_categorical(1.0)
def test_tuple_categories(self):
categories = [(1, 'a'), (2, 'b'), (3, 'c')]
result = CategoricalDtype(categories)
assert all(result.categories == categories)
@pytest.mark.parametrize("categories, expected", [
([True, False], True),
([True, False, None], True),
([True, False, "a", "b'"], False),
([0, 1], False),
])
def test_is_boolean(self, categories, expected):
cat = Categorical(categories)
assert cat.dtype._is_boolean is expected
assert is_bool_dtype(cat) is expected
assert is_bool_dtype(cat.dtype) is expected
class TestDatetimeTZDtype(Base):
def create(self):
return DatetimeTZDtype('ns', 'US/Eastern')
def test_alias_to_unit_raises(self):
# 23990
with tm.assert_produces_warning(FutureWarning):
DatetimeTZDtype('datetime64[ns, US/Central]')
def test_alias_to_unit_bad_alias_raises(self):
# 23990
with pytest.raises(TypeError, match=''):
DatetimeTZDtype('this is a bad string')
with pytest.raises(TypeError, match=''):
DatetimeTZDtype('datetime64[ns, US/NotATZ]')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = DatetimeTZDtype('ns', 'US/Eastern')
dtype3 = DatetimeTZDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
dtype4 = DatetimeTZDtype("ns", "US/Central")
assert dtype2 != dtype4
assert hash(dtype2) != hash(dtype4)
def test_construction(self):
pytest.raises(ValueError,
lambda: DatetimeTZDtype('ms', 'US/Eastern'))
def test_subclass(self):
a = DatetimeTZDtype.construct_from_string('datetime64[ns, US/Eastern]')
b = DatetimeTZDtype.construct_from_string('datetime64[ns, CET]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_compat(self):
assert is_datetime64tz_dtype(self.dtype)
assert is_datetime64tz_dtype('datetime64[ns, US/Eastern]')
assert is_datetime64_any_dtype(self.dtype)
assert is_datetime64_any_dtype('datetime64[ns, US/Eastern]')
assert is_datetime64_ns_dtype(self.dtype)
assert is_datetime64_ns_dtype('datetime64[ns, US/Eastern]')
assert not is_datetime64_dtype(self.dtype)
assert not is_datetime64_dtype('datetime64[ns, US/Eastern]')
def test_construction_from_string(self):
result = DatetimeTZDtype.construct_from_string(
'datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, result)
pytest.raises(TypeError,
lambda: DatetimeTZDtype.construct_from_string('foo'))
def test_construct_from_string_raises(self):
with pytest.raises(TypeError, match="notatz"):
DatetimeTZDtype.construct_from_string('datetime64[ns, notatz]')
with pytest.raises(TypeError,
match="^Could not construct DatetimeTZDtype$"):
DatetimeTZDtype.construct_from_string(['datetime64[ns, notatz]'])
def test_is_dtype(self):
assert not DatetimeTZDtype.is_dtype(None)
assert DatetimeTZDtype.is_dtype(self.dtype)
assert DatetimeTZDtype.is_dtype('datetime64[ns, US/Eastern]')
assert not DatetimeTZDtype.is_dtype('foo')
assert DatetimeTZDtype.is_dtype(DatetimeTZDtype('ns', 'US/Pacific'))
assert not DatetimeTZDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, DatetimeTZDtype('ns', 'US/Eastern'))
assert not is_dtype_equal(self.dtype, 'foo')
assert not is_dtype_equal(self.dtype, DatetimeTZDtype('ns', 'CET'))
assert not is_dtype_equal(DatetimeTZDtype('ns', 'US/Eastern'),
DatetimeTZDtype('ns', 'US/Pacific'))
# numpy compat
assert is_dtype_equal(np.dtype("M8[ns]"), "datetime64[ns]")
def test_basic(self):
assert is_datetime64tz_dtype(self.dtype)
dr = date_range('20130101', periods=3, tz='US/Eastern')
s = Series(dr, name='A')
# dtypes
assert is_datetime64tz_dtype(s.dtype)
assert is_datetime64tz_dtype(s)
assert not is_datetime64tz_dtype(np.dtype('float64'))
assert not is_datetime64tz_dtype(1.0)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s)
assert is_datetimetz(s.dtype)
assert not is_datetimetz(np.dtype('float64'))
assert not is_datetimetz(1.0)
def test_dst(self):
dr1 = date_range('2013-01-01', periods=3, tz='US/Eastern')
s1 = Series(dr1, name='A')
assert is_datetime64tz_dtype(s1)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s1)
dr2 = date_range('2013-08-01', periods=3, tz='US/Eastern')
s2 = Series(dr2, name='A')
assert is_datetime64tz_dtype(s2)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s2)
assert s1.dtype == s2.dtype
@pytest.mark.parametrize('tz', ['UTC', 'US/Eastern'])
@pytest.mark.parametrize('constructor', ['M8', 'datetime64'])
def test_parser(self, tz, constructor):
# pr #11245
dtz_str = '{con}[ns, {tz}]'.format(con=constructor, tz=tz)
result = DatetimeTZDtype.construct_from_string(dtz_str)
expected = DatetimeTZDtype('ns', tz)
assert result == expected
def test_empty(self):
with pytest.raises(TypeError, match="A 'tz' is required."):
DatetimeTZDtype()
class TestPeriodDtype(Base):
def create(self):
return PeriodDtype('D')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = PeriodDtype('D')
dtype3 = PeriodDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert dtype is dtype2
assert dtype2 is dtype
assert dtype3 is dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
def test_construction(self):
with pytest.raises(ValueError):
PeriodDtype('xx')
for s in ['period[D]', 'Period[D]', 'D']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Day()
assert is_period_dtype(dt)
for s in ['period[3D]', 'Period[3D]', '3D']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Day(3)
assert is_period_dtype(dt)
for s in ['period[26H]', 'Period[26H]', '26H',
'period[1D2H]', 'Period[1D2H]', '1D2H']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Hour(26)
assert is_period_dtype(dt)
def test_subclass(self):
a = PeriodDtype('period[D]')
b = PeriodDtype('period[3D]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_identity(self):
assert PeriodDtype('period[D]') == PeriodDtype('period[D]')
assert PeriodDtype('period[D]') is PeriodDtype('period[D]')
assert PeriodDtype('period[3D]') == PeriodDtype('period[3D]')
assert PeriodDtype('period[3D]') is PeriodDtype('period[3D]')
assert PeriodDtype('period[1S1U]') == PeriodDtype('period[1000001U]')
assert PeriodDtype('period[1S1U]') is PeriodDtype('period[1000001U]')
def test_compat(self):
assert not is_datetime64_ns_dtype(self.dtype)
assert not is_datetime64_ns_dtype('period[D]')
assert not is_datetime64_dtype(self.dtype)
assert not is_datetime64_dtype('period[D]')
def test_construction_from_string(self):
result = PeriodDtype('period[D]')
assert is_dtype_equal(self.dtype, result)
result = PeriodDtype.construct_from_string('period[D]')
assert is_dtype_equal(self.dtype, result)
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('foo')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('period[foo]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('foo[D]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('datetime64[ns]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('datetime64[ns, US/Eastern]')
def test_is_dtype(self):
assert PeriodDtype.is_dtype(self.dtype)
assert PeriodDtype.is_dtype('period[D]')
assert PeriodDtype.is_dtype('period[3D]')
assert PeriodDtype.is_dtype(PeriodDtype('3D'))
assert PeriodDtype.is_dtype('period[U]')
assert PeriodDtype.is_dtype('period[S]')
assert PeriodDtype.is_dtype(PeriodDtype('U'))
assert PeriodDtype.is_dtype(PeriodDtype('S'))
assert not PeriodDtype.is_dtype('D')
assert not PeriodDtype.is_dtype('3D')
assert not PeriodDtype.is_dtype('U')
assert not PeriodDtype.is_dtype('S')
assert not PeriodDtype.is_dtype('foo')
assert not PeriodDtype.is_dtype(np.object_)
assert not PeriodDtype.is_dtype(np.int64)
assert not PeriodDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'period[D]')
assert is_dtype_equal(self.dtype, PeriodDtype('D'))
assert is_dtype_equal(self.dtype, PeriodDtype('D'))
assert is_dtype_equal(PeriodDtype('D'), PeriodDtype('D'))
assert not is_dtype_equal(self.dtype, 'D')
assert not is_dtype_equal(PeriodDtype('D'), PeriodDtype('2D'))
def test_basic(self):
assert is_period_dtype(self.dtype)
pidx = pd.period_range('2013-01-01 09:00', periods=5, freq='H')
assert is_period_dtype(pidx.dtype)
assert is_period_dtype(pidx)
with tm.assert_produces_warning(FutureWarning):
assert is_period(pidx)
s = Series(pidx, name='A')
assert is_period_dtype(s.dtype)
assert is_period_dtype(s)
with tm.assert_produces_warning(FutureWarning):
assert is_period(s)
assert not is_period_dtype(np.dtype('float64'))
assert not is_period_dtype(1.0)
with tm.assert_produces_warning(FutureWarning):
assert not is_period(np.dtype('float64'))
with tm.assert_produces_warning(FutureWarning):
assert not is_period(1.0)
def test_empty(self):
dt = PeriodDtype()
with pytest.raises(AttributeError):
str(dt)
def test_not_string(self):
# though PeriodDtype has object kind, it cannot be string
assert not is_string_dtype(PeriodDtype('D'))
class TestIntervalDtype(Base):
def create(self):
return IntervalDtype('int64')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = IntervalDtype('int64')
dtype3 = IntervalDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert dtype is dtype2
assert dtype2 is dtype3
assert dtype3 is dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
dtype1 = IntervalDtype('interval')
dtype2 = IntervalDtype(dtype1)
dtype3 = IntervalDtype('interval')
assert dtype2 == dtype1
assert dtype2 == dtype2
assert dtype2 == dtype3
assert dtype2 is dtype1
assert dtype2 is dtype2
assert dtype2 is dtype3
assert hash(dtype2) == hash(dtype1)
assert hash(dtype2) == hash(dtype2)
assert hash(dtype2) == hash(dtype3)
@pytest.mark.parametrize('subtype', [
'interval[int64]', 'Interval[int64]', 'int64', np.dtype('int64')])
def test_construction(self, subtype):
i = IntervalDtype(subtype)
assert i.subtype == np.dtype('int64')
assert is_interval_dtype(i)
@pytest.mark.parametrize('subtype', [None, 'interval', 'Interval'])
def test_construction_generic(self, subtype):
# generic
i = IntervalDtype(subtype)
assert i.subtype is None
assert is_interval_dtype(i)
@pytest.mark.parametrize('subtype', [
CategoricalDtype(list('abc'), False),
CategoricalDtype(list('wxyz'), True),
object, str, '<U10', 'interval[category]', 'interval[object]'])
def test_construction_not_supported(self, subtype):
# GH 19016
msg = ('category, object, and string subtypes are not supported '
'for IntervalDtype')
with pytest.raises(TypeError, match=msg):
IntervalDtype(subtype)
@pytest.mark.parametrize('subtype', ['xx', 'IntervalA', 'Interval[foo]'])
def test_construction_errors(self, subtype):
msg = 'could not construct IntervalDtype'
with pytest.raises(TypeError, match=msg):
IntervalDtype(subtype)
def test_construction_from_string(self):
result = IntervalDtype('interval[int64]')
assert is_dtype_equal(self.dtype, result)
result = IntervalDtype.construct_from_string('interval[int64]')
assert is_dtype_equal(self.dtype, result)
@pytest.mark.parametrize('string', [
0, 3.14, ('a', 'b'), None])
def test_construction_from_string_errors(self, string):
# these are invalid entirely
msg = 'a string needs to be passed, got type'
with pytest.raises(TypeError, match=msg):
IntervalDtype.construct_from_string(string)
@pytest.mark.parametrize('string', [
'foo', 'foo[int64]', 'IntervalA'])
def test_construction_from_string_error_subtype(self, string):
# this is an invalid subtype
msg = ("Incorrectly formatted string passed to constructor. "
r"Valid formats include Interval or Interval\[dtype\] "
"where dtype is numeric, datetime, or timedelta")
with pytest.raises(TypeError, match=msg):
IntervalDtype.construct_from_string(string)
def test_subclass(self):
a = IntervalDtype('interval[int64]')
b = IntervalDtype('interval[int64]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_is_dtype(self):
assert IntervalDtype.is_dtype(self.dtype)
assert IntervalDtype.is_dtype('interval')
assert IntervalDtype.is_dtype(IntervalDtype('float64'))
assert IntervalDtype.is_dtype(IntervalDtype('int64'))
assert IntervalDtype.is_dtype(IntervalDtype(np.int64))
assert not IntervalDtype.is_dtype('D')
assert not IntervalDtype.is_dtype('3D')
assert not IntervalDtype.is_dtype('U')
assert not IntervalDtype.is_dtype('S')
assert not IntervalDtype.is_dtype('foo')
assert not IntervalDtype.is_dtype('IntervalA')
assert not IntervalDtype.is_dtype(np.object_)
assert not IntervalDtype.is_dtype(np.int64)
assert not IntervalDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'interval[int64]')
assert is_dtype_equal(self.dtype, IntervalDtype('int64'))
assert is_dtype_equal(IntervalDtype('int64'), IntervalDtype('int64'))
assert not is_dtype_equal(self.dtype, 'int64')
assert not is_dtype_equal(IntervalDtype('int64'),
IntervalDtype('float64'))
# invalid subtype comparisons do not raise when directly compared
dtype1 = IntervalDtype('float64')
dtype2 = IntervalDtype('datetime64[ns, US/Eastern]')
assert dtype1 != dtype2
assert dtype2 != dtype1
@pytest.mark.parametrize('subtype', [
None, 'interval', 'Interval', 'int64', 'uint64', 'float64',
'complex128', 'datetime64', 'timedelta64', PeriodDtype('Q')])
def test_equality_generic(self, subtype):
# GH 18980
dtype = IntervalDtype(subtype)
assert is_dtype_equal(dtype, 'interval')
assert is_dtype_equal(dtype, IntervalDtype())
@pytest.mark.parametrize('subtype', [
'int64', 'uint64', 'float64', 'complex128', 'datetime64',
'timedelta64', PeriodDtype('Q')])
def test_name_repr(self, subtype):
# GH 18980
dtype = IntervalDtype(subtype)
expected = 'interval[{subtype}]'.format(subtype=subtype)
assert str(dtype) == expected
assert dtype.name == 'interval'
@pytest.mark.parametrize('subtype', [None, 'interval', 'Interval'])
def test_name_repr_generic(self, subtype):
# GH 18980
dtype = IntervalDtype(subtype)
assert str(dtype) == 'interval'
assert dtype.name == 'interval'
def test_basic(self):
assert is_interval_dtype(self.dtype)
ii = IntervalIndex.from_breaks(range(3))
assert is_interval_dtype(ii.dtype)
assert is_interval_dtype(ii)
s = Series(ii, name='A')
assert is_interval_dtype(s.dtype)
assert is_interval_dtype(s)
def test_basic_dtype(self):
assert is_interval_dtype('interval[int64]')
assert is_interval_dtype(IntervalIndex.from_tuples([(0, 1)]))
assert is_interval_dtype(IntervalIndex.from_breaks(np.arange(4)))
assert is_interval_dtype(IntervalIndex.from_breaks(
date_range('20130101', periods=3)))
assert not is_interval_dtype('U')
assert not is_interval_dtype('S')
assert not is_interval_dtype('foo')
assert not is_interval_dtype(np.object_)
assert not is_interval_dtype(np.int64)
assert not is_interval_dtype(np.float64)
def test_caching(self):
IntervalDtype.reset_cache()
dtype = IntervalDtype("int64")
assert len(IntervalDtype._cache) == 1
IntervalDtype("interval")
assert len(IntervalDtype._cache) == 2
IntervalDtype.reset_cache()
tm.round_trip_pickle(dtype)
assert len(IntervalDtype._cache) == 0
class TestCategoricalDtypeParametrized(object):
@pytest.mark.parametrize('categories', [
list('abcd'),
np.arange(1000),
['a', 'b', 10, 2, 1.3, True],
[True, False],
pd.date_range('2017', periods=4)])
def test_basic(self, categories, ordered):
c1 = CategoricalDtype(categories, ordered=ordered)
tm.assert_index_equal(c1.categories, pd.Index(categories))
assert c1.ordered is ordered
def test_order_matters(self):
categories = ['a', 'b']
c1 = CategoricalDtype(categories, ordered=True)
c2 = CategoricalDtype(categories, ordered=False)
c3 = CategoricalDtype(categories, ordered=None)
assert c1 is not c2
assert c1 is not c3
@pytest.mark.parametrize('ordered', [False, None])
def test_unordered_same(self, ordered):
c1 = CategoricalDtype(['a', 'b'], ordered=ordered)
c2 = CategoricalDtype(['b', 'a'], ordered=ordered)
assert hash(c1) == hash(c2)
def test_categories(self):
result = CategoricalDtype(['a', 'b', 'c'])
tm.assert_index_equal(result.categories, pd.Index(['a', 'b', 'c']))
assert result.ordered is None
def test_equal_but_different(self, ordered):
c1 = CategoricalDtype([1, 2, 3])
c2 = CategoricalDtype([1., 2., 3.])
assert c1 is not c2
assert c1 != c2
@pytest.mark.parametrize('v1, v2', [
([1, 2, 3], [1, 2, 3]),
([1, 2, 3], [3, 2, 1]),
])
def test_order_hashes_different(self, v1, v2):
c1 = CategoricalDtype(v1, ordered=False)
c2 = CategoricalDtype(v2, ordered=True)
c3 = CategoricalDtype(v1, ordered=None)
assert c1 is not c2
assert c1 is not c3
def test_nan_invalid(self):
with pytest.raises(ValueError):
CategoricalDtype([1, 2, np.nan])
def test_non_unique_invalid(self):
with pytest.raises(ValueError):
CategoricalDtype([1, 2, 1])
def test_same_categories_different_order(self):
c1 = CategoricalDtype(['a', 'b'], ordered=True)
c2 = CategoricalDtype(['b', 'a'], ordered=True)
assert c1 is not c2
@pytest.mark.parametrize('ordered1', [True, False, None])
@pytest.mark.parametrize('ordered2', [True, False, None])
def test_categorical_equality(self, ordered1, ordered2):
# same categories, same order
# any combination of None/False are equal
# True/True is the only combination with True that are equal
c1 = CategoricalDtype(list('abc'), ordered1)
c2 = CategoricalDtype(list('abc'), ordered2)
result = c1 == c2
expected = bool(ordered1) is bool(ordered2)
assert result is expected
# same categories, different order
# any combination of None/False are equal (order doesn't matter)
# any combination with True are not equal (different order of cats)
c1 = CategoricalDtype(list('abc'), ordered1)
c2 = CategoricalDtype(list('cab'), ordered2)
result = c1 == c2
expected = (bool(ordered1) is False) and (bool(ordered2) is False)
assert result is expected
# different categories
c2 = CategoricalDtype([1, 2, 3], ordered2)
assert c1 != c2
# none categories
c1 = CategoricalDtype(list('abc'), ordered1)
c2 = CategoricalDtype(None, ordered2)
c3 = CategoricalDtype(None, ordered1)
assert c1 == c2
assert c2 == c1
assert c2 == c3
@pytest.mark.parametrize('categories', [list('abc'), None])
@pytest.mark.parametrize('other', ['category', 'not a category'])
def test_categorical_equality_strings(self, categories, ordered, other):
c1 = CategoricalDtype(categories, ordered)
result = c1 == other
expected = other == 'category'
assert result is expected
def test_invalid_raises(self):
with pytest.raises(TypeError, match='ordered'):
CategoricalDtype(['a', 'b'], ordered='foo')
with pytest.raises(TypeError, match="'categories' must be list-like"):
CategoricalDtype('category')
def test_mixed(self):
a = CategoricalDtype(['a', 'b', 1, 2])
b = CategoricalDtype(['a', 'b', '1', '2'])
assert hash(a) != hash(b)
def test_from_categorical_dtype_identity(self):
c1 = Categorical([1, 2], categories=[1, 2, 3], ordered=True)
# Identity test for no changes
c2 = CategoricalDtype._from_categorical_dtype(c1)
assert c2 is c1
def test_from_categorical_dtype_categories(self):
c1 =
|
Categorical([1, 2], categories=[1, 2, 3], ordered=True)
|
pandas.Categorical
|
# exotol
import joblib
import re
import numpy as np
import pandas as pd
import scipy
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.svm import SVC
from string import punctuation
from solvers.solver_helpers import AbstractSolver
def clean(text, remove_stop_words=True):
text = re.sub(r"\d+\)", "", text)
text = re.sub(r"\(\d+\)", "", text)
text = re.sub(r"др.", "", text)
text = re.sub(r"<...>|<…>", "", text)
text = re.sub(r"<....>|\( ..... \)|\( ... \)", "", text)
text = re.sub(r"«|—|»|iii|…|xiv", "", text)
text = " ".join([c for c in word_tokenize(text) if c not in punctuation])
if remove_stop_words:
text = text.split()
text = [w.lower().strip() for w in text if not w in stopwords.words("russian")]
text = " ".join(text)
return text
class Solver(AbstractSolver):
def __init__(self):
self.clf = SVC(kernel="linear", probability=True)
self.count_vec = CountVectorizer(
analyzer="word", token_pattern=r"\w{1,}", ngram_range=(1, 3)
)
def transform_vec(self, train, test=None, type_transform="count_vec"):
if type_transform == "count_vec":
self.count_vec.fit(pd.concat((train["pq1"], train["pq2"])).unique())
trainq1_trans = self.count_vec.transform(train["pq1"].values)
trainq2_trans = self.count_vec.transform(train["pq2"].values)
X_train = scipy.sparse.hstack((trainq1_trans, trainq2_trans))
if "target" not in train.columns:
return X_train
y_train = train["target"].values
if not (test is None):
trainq1_trans = self.count_vec.transform(test["pq1"].values)
trainq2_trans = self.count_vec.transform(test["pq2"].values)
labels = test["target"].values
X_valid = scipy.sparse.hstack((trainq1_trans, trainq2_trans))
y_valid = labels
return X_train, y_train, X_valid, y_valid
return X_train, y_train
def fit(self, tasks):
train = self.create_dataset(tasks, is_train=True)
train["pq1"] = train["q1"].apply(clean)
train["pq2"] = train["q2"].apply(clean)
X_train, y_train = self.transform_vec(train, None, type_transform="count_vec")
self.clf.fit(X_train, y_train)
def load(self, path="data/models/solvers/solver22/solver22.pkl"):
model = joblib.load(path)
self.clf = model["classifier"]
self.count_vec = model["count_vec"]
self.is_loaded = True
def save(self, path="data/models/solvers/solver22/solver22.pkl"):
model = {"classifier": self.clf, "count_vec": self.count_vec}
joblib.dump(model, path)
def create_dataset(self, tasks, is_train=True):
data = []
if is_train:
for task in tasks:
if "correct_variants" in task["solution"]:
solution = task["solution"]["correct_variants"][0]
if "correct" in task["solution"]:
solution = task["solution"]["correct"]
_tmp = []
for choice in task["question"]["choices"]:
row = [
choice["id"],
task["text"],
choice["text"],
1 if choice["id"] in solution else 0,
]
_tmp.append(choice["text"])
data.append(row)
return pd.DataFrame(data, columns=["id", "q1", "q2", "target"])
else:
for task in tasks:
_tmp = []
for choice in task["question"]["choices"]:
row = [choice["id"], task["text"], choice["text"]]
_tmp.append(choice["text"])
data.append(row)
return
|
pd.DataFrame(data, columns=["id", "q1", "q2"])
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.